2024-11-19 18:27:57,389 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 18:27:57,399 main DEBUG Took 0.009032 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 18:27:57,400 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 18:27:57,400 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 18:27:57,401 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 18:27:57,402 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,408 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 18:27:57,420 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,421 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,422 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,422 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,422 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,422 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,423 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,424 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,424 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,424 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,425 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,425 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,426 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,426 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,426 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,427 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,427 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,427 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,428 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,428 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,428 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,428 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,429 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,429 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 18:27:57,430 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,430 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 18:27:57,431 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 18:27:57,432 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 18:27:57,434 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 18:27:57,434 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 18:27:57,435 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 18:27:57,436 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 18:27:57,444 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 18:27:57,446 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 18:27:57,447 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 18:27:57,448 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 18:27:57,448 main DEBUG createAppenders(={Console}) 2024-11-19 18:27:57,449 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-19 18:27:57,449 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 18:27:57,449 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-19 18:27:57,450 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 18:27:57,450 main DEBUG OutputStream closed 2024-11-19 18:27:57,450 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 18:27:57,450 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 18:27:57,451 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-19 18:27:57,515 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 18:27:57,517 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 18:27:57,518 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 18:27:57,518 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 18:27:57,519 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 18:27:57,519 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 18:27:57,520 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 18:27:57,520 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 18:27:57,520 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 18:27:57,520 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 18:27:57,521 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 18:27:57,521 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 18:27:57,521 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 18:27:57,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 18:27:57,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 18:27:57,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 18:27:57,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 18:27:57,523 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 18:27:57,525 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 18:27:57,525 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-19 18:27:57,526 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 18:27:57,526 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-19T18:27:57,812 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7 2024-11-19 18:27:57,815 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 18:27:57,815 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T18:27:57,825 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-19T18:27:57,857 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=156, ProcessCount=11, AvailableMemoryMB=7404 2024-11-19T18:27:57,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:27:57,874 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd, deleteOnExit=true 2024-11-19T18:27:57,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:27:57,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/test.cache.data in system properties and HBase conf 2024-11-19T18:27:57,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:27:57,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:27:57,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:27:57,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:27:57,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:27:57,962 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T18:27:58,050 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:27:58,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:27:58,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:27:58,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:27:58,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:27:58,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:27:58,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:27:58,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:27:58,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:27:58,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:27:58,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:27:58,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:27:58,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:27:58,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:27:58,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:27:58,538 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:27:58,891 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T18:27:58,968 INFO [Time-limited test {}] log.Log(170): Logging initialized @2350ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T18:27:59,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:27:59,106 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:27:59,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:27:59,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:27:59,127 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:27:59,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:27:59,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:27:59,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:27:59,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/java.io.tmpdir/jetty-localhost-41577-hadoop-hdfs-3_4_1-tests_jar-_-any-9065732446330126687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:27:59,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:41577} 2024-11-19T18:27:59,344 INFO [Time-limited test {}] server.Server(415): Started @2726ms 2024-11-19T18:27:59,377 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:27:59,723 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:27:59,729 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:27:59,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:27:59,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:27:59,732 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:27:59,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:27:59,733 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:27:59,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ca8488f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/java.io.tmpdir/jetty-localhost-44273-hadoop-hdfs-3_4_1-tests_jar-_-any-906713478638417605/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:27:59,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:44273} 2024-11-19T18:27:59,856 INFO [Time-limited test {}] server.Server(415): Started @3238ms 2024-11-19T18:27:59,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:28:00,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:28:00,040 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:28:00,046 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:28:00,046 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:28:00,046 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:28:00,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:28:00,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:28:00,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ca1952e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/java.io.tmpdir/jetty-localhost-33507-hadoop-hdfs-3_4_1-tests_jar-_-any-17223342277884378657/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:28:00,171 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:33507} 2024-11-19T18:28:00,171 INFO [Time-limited test {}] server.Server(415): Started @3554ms 2024-11-19T18:28:00,174 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:28:00,366 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data4/current/BP-342907355-172.17.0.2-1732040878628/current, will proceed with Du for space computation calculation, 2024-11-19T18:28:00,366 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data3/current/BP-342907355-172.17.0.2-1732040878628/current, will proceed with Du for space computation calculation, 2024-11-19T18:28:00,366 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data1/current/BP-342907355-172.17.0.2-1732040878628/current, will proceed with Du for space computation calculation, 2024-11-19T18:28:00,366 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data2/current/BP-342907355-172.17.0.2-1732040878628/current, will proceed with Du for space computation calculation, 2024-11-19T18:28:00,414 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:28:00,421 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:28:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12b86e674290c464 with lease ID 0xa79e55b8607e1e21: Processing first storage report for DS-255e42af-274e-4796-be06-863aaad1d285 from datanode DatanodeRegistration(127.0.0.1:34527, datanodeUuid=68bc039d-f2d9-424a-8e87-ba7b1931e499, infoPort=33663, infoSecurePort=0, ipcPort=45089, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628) 2024-11-19T18:28:00,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12b86e674290c464 with lease ID 0xa79e55b8607e1e21: from storage DS-255e42af-274e-4796-be06-863aaad1d285 node DatanodeRegistration(127.0.0.1:34527, datanodeUuid=68bc039d-f2d9-424a-8e87-ba7b1931e499, infoPort=33663, infoSecurePort=0, ipcPort=45089, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-19T18:28:00,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c39fcec168df453 with lease ID 0xa79e55b8607e1e22: Processing first storage report for DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350 from datanode DatanodeRegistration(127.0.0.1:45723, datanodeUuid=624bf150-708b-42e5-a448-0cf2d7b3e7a6, infoPort=39983, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628) 2024-11-19T18:28:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c39fcec168df453 with lease ID 0xa79e55b8607e1e22: from storage DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350 node DatanodeRegistration(127.0.0.1:45723, datanodeUuid=624bf150-708b-42e5-a448-0cf2d7b3e7a6, infoPort=39983, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:28:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12b86e674290c464 with lease ID 0xa79e55b8607e1e21: Processing first storage report for DS-89a4846d-539f-4428-8db7-09323ccf31fd from datanode DatanodeRegistration(127.0.0.1:34527, datanodeUuid=68bc039d-f2d9-424a-8e87-ba7b1931e499, infoPort=33663, infoSecurePort=0, ipcPort=45089, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628) 2024-11-19T18:28:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12b86e674290c464 with lease ID 0xa79e55b8607e1e21: from storage DS-89a4846d-539f-4428-8db7-09323ccf31fd node DatanodeRegistration(127.0.0.1:34527, datanodeUuid=68bc039d-f2d9-424a-8e87-ba7b1931e499, infoPort=33663, infoSecurePort=0, ipcPort=45089, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:28:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c39fcec168df453 with lease ID 0xa79e55b8607e1e22: Processing first storage report for DS-2abffa58-bf35-4396-a0e8-ffa2ad656ba3 from datanode DatanodeRegistration(127.0.0.1:45723, datanodeUuid=624bf150-708b-42e5-a448-0cf2d7b3e7a6, infoPort=39983, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628) 2024-11-19T18:28:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c39fcec168df453 with lease ID 0xa79e55b8607e1e22: from storage DS-2abffa58-bf35-4396-a0e8-ffa2ad656ba3 node DatanodeRegistration(127.0.0.1:45723, datanodeUuid=624bf150-708b-42e5-a448-0cf2d7b3e7a6, infoPort=39983, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=1608281333;c=1732040878628), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T18:28:00,568 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7 2024-11-19T18:28:00,643 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/zookeeper_0, clientPort=64665, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:28:00,653 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64665 2024-11-19T18:28:00,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:00,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:00,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:28:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:28:01,298 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198 with version=8 2024-11-19T18:28:01,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:28:01,388 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T18:28:01,636 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:28:01,647 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:01,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:01,652 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:28:01,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:01,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:28:01,802 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:28:01,865 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T18:28:01,873 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T18:28:01,877 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:28:01,904 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20383 (auto-detected) 2024-11-19T18:28:01,905 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T18:28:01,925 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46305 2024-11-19T18:28:01,953 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46305 connecting to ZooKeeper ensemble=127.0.0.1:64665 2024-11-19T18:28:01,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463050x0, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:28:01,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46305-0x101317c5c810000 connected 2024-11-19T18:28:02,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:02,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:02,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:28:02,047 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198, hbase.cluster.distributed=false 2024-11-19T18:28:02,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:28:02,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46305 2024-11-19T18:28:02,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46305 2024-11-19T18:28:02,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46305 2024-11-19T18:28:02,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46305 2024-11-19T18:28:02,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46305 2024-11-19T18:28:02,193 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:28:02,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:02,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:02,196 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:28:02,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:28:02,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:28:02,199 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:28:02,201 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:28:02,202 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42803 2024-11-19T18:28:02,204 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42803 connecting to ZooKeeper ensemble=127.0.0.1:64665 2024-11-19T18:28:02,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:02,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:02,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428030x0, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:28:02,218 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:28:02,218 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42803-0x101317c5c810001 connected 2024-11-19T18:28:02,222 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:28:02,230 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:28:02,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:28:02,238 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:28:02,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42803 2024-11-19T18:28:02,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42803 2024-11-19T18:28:02,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42803 2024-11-19T18:28:02,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42803 2024-11-19T18:28:02,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42803 2024-11-19T18:28:02,257 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:46305 2024-11-19T18:28:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:28:02,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:28:02,267 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:28:02,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,291 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:28:02,292 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,46305,1732040881439 from backup master directory 2024-11-19T18:28:02,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:28:02,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:28:02,298 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:28:02,298 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,300 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T18:28:02,302 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T18:28:02,362 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase.id] with ID: eee4397a-1617-4704-ab09-2adfde728aa1 2024-11-19T18:28:02,362 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/.tmp/hbase.id 2024-11-19T18:28:02,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:28:02,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:28:02,375 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/.tmp/hbase.id]:[hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase.id] 2024-11-19T18:28:02,417 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:02,422 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:28:02,441 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-19T18:28:02,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:28:02,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:28:02,477 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:28:02,479 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:28:02,485 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:28:02,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:28:02,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:28:02,537 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store 2024-11-19T18:28:02,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:28:02,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:28:02,562 INFO [master/30db5f576be8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T18:28:02,565 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:02,566 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:28:02,566 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:28:02,566 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:28:02,568 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:28:02,568 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:28:02,568 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:28:02,569 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732040882566Disabling compacts and flushes for region at 1732040882566Disabling writes for close at 1732040882568 (+2 ms)Writing region close event to WAL at 1732040882568Closed at 1732040882568 2024-11-19T18:28:02,571 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/.initializing 2024-11-19T18:28:02,571 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/WALs/30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,592 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C46305%2C1732040881439, suffix=, logDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/WALs/30db5f576be8,46305,1732040881439, archiveDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/oldWALs, maxLogs=10 2024-11-19T18:28:02,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C46305%2C1732040881439.1732040882597 2024-11-19T18:28:02,619 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/WALs/30db5f576be8,46305,1732040881439/30db5f576be8%2C46305%2C1732040881439.1732040882597 2024-11-19T18:28:02,628 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39983:39983),(127.0.0.1/127.0.0.1:33663:33663)] 2024-11-19T18:28:02,629 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:28:02,630 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:02,633 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,634 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:28:02,706 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:02,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:02,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:28:02,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:02,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:28:02,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:28:02,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:02,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:28:02,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:28:02,722 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:02,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:28:02,723 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,726 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,727 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,732 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,733 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,736 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:28:02,739 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:28:02,744 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:28:02,745 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703887, jitterRate=-0.10496188700199127}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:28:02,751 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732040882647Initializing all the Stores at 1732040882649 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040882650 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040882650Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040882651 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040882651Cleaning up temporary data from old regions at 1732040882733 (+82 ms)Region opened successfully at 1732040882751 (+18 ms) 2024-11-19T18:28:02,752 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:28:02,787 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7747da44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:28:02,822 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:28:02,834 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:28:02,835 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:28:02,838 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:28:02,840 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T18:28:02,845 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-19T18:28:02,845 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:28:02,873 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:28:02,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:28:02,885 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:28:02,888 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:28:02,890 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:28:02,892 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:28:02,894 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:28:02,898 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:28:02,901 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:28:02,902 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:28:02,904 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:28:02,923 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:28:02,924 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:28:02,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:28:02,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:28:02,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,931 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,46305,1732040881439, sessionid=0x101317c5c810000, setting cluster-up flag (Was=false) 2024-11-19T18:28:02,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,952 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:28:02,954 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:02,967 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:28:02,969 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,46305,1732040881439 2024-11-19T18:28:02,975 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:28:03,046 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(746): ClusterId : eee4397a-1617-4704-ab09-2adfde728aa1 2024-11-19T18:28:03,049 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:28:03,053 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:28:03,055 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:28:03,055 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:28:03,058 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:28:03,059 DEBUG [RS:0;30db5f576be8:42803 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60e6fac2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:28:03,064 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:28:03,071 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:28:03,074 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:42803 2024-11-19T18:28:03,077 INFO [RS:0;30db5f576be8:42803 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:28:03,077 INFO [RS:0;30db5f576be8:42803 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:28:03,077 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:28:03,079 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,46305,1732040881439 with port=42803, startcode=1732040882153 2024-11-19T18:28:03,079 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,46305,1732040881439 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:28:03,088 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:28:03,088 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:28:03,088 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:28:03,089 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:28:03,089 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:28:03,089 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,089 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:28:03,090 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,090 DEBUG [RS:0;30db5f576be8:42803 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:28:03,093 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732040913093 2024-11-19T18:28:03,094 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:28:03,095 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:28:03,095 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:28:03,095 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:28:03,099 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:28:03,099 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:28:03,100 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:28:03,100 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:28:03,100 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,102 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,102 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:28:03,104 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:28:03,105 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:28:03,106 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:28:03,108 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:28:03,109 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:28:03,110 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040883110,5,FailOnTimeoutGroup] 2024-11-19T18:28:03,112 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040883111,5,FailOnTimeoutGroup] 2024-11-19T18:28:03,112 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,113 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:28:03,114 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,114 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:28:03,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:28:03,122 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:28:03,123 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198 2024-11-19T18:28:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:28:03,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:28:03,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:03,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:28:03,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:28:03,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:28:03,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:28:03,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:28:03,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:28:03,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:28:03,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:28:03,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:28:03,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740 2024-11-19T18:28:03,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740 2024-11-19T18:28:03,166 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51565, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:28:03,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:28:03,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:28:03,169 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:28:03,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:28:03,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46305 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,177 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46305 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,177 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:28:03,178 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880618, jitterRate=0.11976437270641327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:28:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732040883142Initializing all the Stores at 1732040883144 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883144Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883144Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040883144Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883144Cleaning up temporary data from old regions at 1732040883168 (+24 ms)Region opened successfully at 1732040883181 (+13 ms) 2024-11-19T18:28:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:28:03,181 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:28:03,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:28:03,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:28:03,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:28:03,183 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:28:03,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732040883181Disabling compacts and flushes for region at 1732040883181Disabling writes for close at 1732040883182 (+1 ms)Writing region close event to WAL at 1732040883183 (+1 ms)Closed at 1732040883183 2024-11-19T18:28:03,187 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:28:03,187 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:28:03,193 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198 2024-11-19T18:28:03,193 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34103 2024-11-19T18:28:03,193 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:28:03,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:28:03,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:28:03,200 DEBUG [RS:0;30db5f576be8:42803 {}] zookeeper.ZKUtil(111): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,200 WARN [RS:0;30db5f576be8:42803 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:28:03,200 INFO [RS:0;30db5f576be8:42803 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:28:03,200 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,202 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,42803,1732040882153] 2024-11-19T18:28:03,202 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:28:03,205 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:28:03,229 INFO [RS:0;30db5f576be8:42803 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:28:03,242 INFO [RS:0;30db5f576be8:42803 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:28:03,247 INFO [RS:0;30db5f576be8:42803 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:28:03,247 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,248 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:28:03,254 INFO [RS:0;30db5f576be8:42803 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:28:03,255 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,255 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:28:03,256 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:28:03,257 DEBUG [RS:0;30db5f576be8:42803 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:28:03,258 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,259 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,259 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,259 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,259 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,259 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,42803,1732040882153-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:28:03,279 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:28:03,281 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,42803,1732040882153-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,281 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,282 INFO [RS:0;30db5f576be8:42803 {}] regionserver.Replication(171): 30db5f576be8,42803,1732040882153 started 2024-11-19T18:28:03,300 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:03,301 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,42803,1732040882153, RpcServer on 30db5f576be8/172.17.0.2:42803, sessionid=0x101317c5c810001 2024-11-19T18:28:03,302 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:28:03,302 DEBUG [RS:0;30db5f576be8:42803 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,302 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,42803,1732040882153' 2024-11-19T18:28:03,302 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:28:03,303 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:28:03,304 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:28:03,304 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:28:03,304 DEBUG [RS:0;30db5f576be8:42803 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,304 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,42803,1732040882153' 2024-11-19T18:28:03,304 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:28:03,305 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:28:03,305 DEBUG [RS:0;30db5f576be8:42803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:28:03,305 INFO [RS:0;30db5f576be8:42803 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:28:03,305 INFO [RS:0;30db5f576be8:42803 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:28:03,356 WARN [30db5f576be8:46305 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:28:03,414 INFO [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C42803%2C1732040882153, suffix=, logDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153, archiveDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs, maxLogs=32 2024-11-19T18:28:03,417 INFO [RS:0;30db5f576be8:42803 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040883416 2024-11-19T18:28:03,426 INFO [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040883416 2024-11-19T18:28:03,427 DEBUG [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:28:03,608 DEBUG [30db5f576be8:46305 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:28:03,621 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,628 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,42803,1732040882153, state=OPENING 2024-11-19T18:28:03,633 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:28:03,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:03,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:28:03,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:28:03,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:28:03,637 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:28:03,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,42803,1732040882153}] 2024-11-19T18:28:03,818 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:28:03,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39243, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:28:03,832 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:28:03,832 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:28:03,836 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C42803%2C1732040882153.meta, suffix=.meta, logDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153, archiveDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs, maxLogs=32 2024-11-19T18:28:03,838 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.meta.1732040883838.meta 2024-11-19T18:28:03,846 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.meta.1732040883838.meta 2024-11-19T18:28:03,848 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:28:03,849 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:28:03,850 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:28:03,853 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:28:03,859 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:28:03,864 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:28:03,864 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:03,864 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:28:03,865 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:28:03,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:28:03,869 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:28:03,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:28:03,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:28:03,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:28:03,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:28:03,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:28:03,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:28:03,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:03,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:28:03,878 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:28:03,879 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740 2024-11-19T18:28:03,881 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740 2024-11-19T18:28:03,883 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:28:03,883 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:28:03,884 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:28:03,887 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:28:03,888 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827188, jitterRate=0.051824599504470825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:28:03,888 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:28:03,890 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732040883865Writing region info on filesystem at 1732040883865Initializing all the Stores at 1732040883867 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883867Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883867Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040883867Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040883867Cleaning up temporary data from old regions at 1732040883883 (+16 ms)Running coprocessor post-open hooks at 1732040883889 (+6 ms)Region opened successfully at 1732040883890 (+1 ms) 2024-11-19T18:28:03,897 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732040883808 2024-11-19T18:28:03,909 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:28:03,909 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:28:03,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,913 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,42803,1732040882153, state=OPEN 2024-11-19T18:28:03,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:28:03,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:28:03,921 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:28:03,921 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:28:03,921 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,42803,1732040882153 2024-11-19T18:28:03,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:28:03,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,42803,1732040882153 in 282 msec 2024-11-19T18:28:03,933 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:28:03,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 736 msec 2024-11-19T18:28:03,935 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:28:03,935 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:28:03,955 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:28:03,957 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,42803,1732040882153, seqNum=-1] 2024-11-19T18:28:03,981 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:28:03,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60331, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:28:04,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 997 msec 2024-11-19T18:28:04,002 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732040884002, completionTime=-1 2024-11-19T18:28:04,005 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:28:04,005 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:28:04,033 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:28:04,033 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732040944033 2024-11-19T18:28:04,033 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041004033 2024-11-19T18:28:04,033 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-19T18:28:04,036 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,036 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,036 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,038 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:46305, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,038 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,039 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,044 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:28:04,065 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.767sec 2024-11-19T18:28:04,066 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:28:04,067 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:28:04,068 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:28:04,069 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:28:04,069 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:28:04,070 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:28:04,070 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:28:04,079 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:28:04,080 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:28:04,080 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46305,1732040881439-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:28:04,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48e858bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:28:04,158 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T18:28:04,158 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T18:28:04,162 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,46305,-1 for getting cluster id 2024-11-19T18:28:04,164 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:28:04,173 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'eee4397a-1617-4704-ab09-2adfde728aa1' 2024-11-19T18:28:04,176 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:28:04,176 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "eee4397a-1617-4704-ab09-2adfde728aa1" 2024-11-19T18:28:04,178 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b860e5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:28:04,179 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,46305,-1] 2024-11-19T18:28:04,181 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:28:04,183 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:28:04,185 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38226, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:28:04,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b20e2b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:28:04,188 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:28:04,195 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,42803,1732040882153, seqNum=-1] 2024-11-19T18:28:04,196 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:28:04,199 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51060, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:28:04,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,46305,1732040881439 2024-11-19T18:28:04,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:28:04,229 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:28:04,233 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T18:28:04,238 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 30db5f576be8,46305,1732040881439 2024-11-19T18:28:04,241 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5fd5bef9 2024-11-19T18:28:04,242 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T18:28:04,245 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38242, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T18:28:04,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T18:28:04,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T18:28:04,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:28:04,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-19T18:28:04,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T18:28:04,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-19T18:28:04,264 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:04,267 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T18:28:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:28:04,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741835_1011 (size=389) 2024-11-19T18:28:04,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741835_1011 (size=389) 2024-11-19T18:28:04,308 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1ff6ae4e1d66794cb849e981de96ff68, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198 2024-11-19T18:28:04,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741836_1012 (size=72) 2024-11-19T18:28:04,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741836_1012 (size=72) 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1ff6ae4e1d66794cb849e981de96ff68, disabling compactions & flushes 2024-11-19T18:28:04,319 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. after waiting 0 ms 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,319 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,319 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1ff6ae4e1d66794cb849e981de96ff68: Waiting for close lock at 1732040884319Disabling compacts and flushes for region at 1732040884319Disabling writes for close at 1732040884319Writing region close event to WAL at 1732040884319Closed at 1732040884319 2024-11-19T18:28:04,322 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T18:28:04,326 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732040884322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732040884322"}]},"ts":"1732040884322"} 2024-11-19T18:28:04,332 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T18:28:04,335 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T18:28:04,337 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732040884335"}]},"ts":"1732040884335"} 2024-11-19T18:28:04,342 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-19T18:28:04,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68, ASSIGN}] 2024-11-19T18:28:04,347 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68, ASSIGN 2024-11-19T18:28:04,349 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68, ASSIGN; state=OFFLINE, location=30db5f576be8,42803,1732040882153; forceNewPlan=false, retain=false 2024-11-19T18:28:04,501 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ff6ae4e1d66794cb849e981de96ff68, regionState=OPENING, regionLocation=30db5f576be8,42803,1732040882153 2024-11-19T18:28:04,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68, ASSIGN because future has completed 2024-11-19T18:28:04,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ff6ae4e1d66794cb849e981de96ff68, server=30db5f576be8,42803,1732040882153}] 2024-11-19T18:28:04,668 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,669 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ff6ae4e1d66794cb849e981de96ff68, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:28:04,669 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,669 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:28:04,669 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,670 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,672 INFO [StoreOpener-1ff6ae4e1d66794cb849e981de96ff68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,675 INFO [StoreOpener-1ff6ae4e1d66794cb849e981de96ff68-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ff6ae4e1d66794cb849e981de96ff68 columnFamilyName info 2024-11-19T18:28:04,675 DEBUG [StoreOpener-1ff6ae4e1d66794cb849e981de96ff68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:28:04,677 INFO [StoreOpener-1ff6ae4e1d66794cb849e981de96ff68-1 {}] regionserver.HStore(327): Store=1ff6ae4e1d66794cb849e981de96ff68/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:28:04,677 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,678 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,679 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,680 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,680 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,683 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,686 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:28:04,687 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1ff6ae4e1d66794cb849e981de96ff68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775140, jitterRate=-0.014359712600708008}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:28:04,687 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:04,688 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1ff6ae4e1d66794cb849e981de96ff68: Running coprocessor pre-open hook at 1732040884670Writing region info on filesystem at 1732040884670Initializing all the Stores at 1732040884672 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040884672Cleaning up temporary data from old regions at 1732040884680 (+8 ms)Running coprocessor post-open hooks at 1732040884687 (+7 ms)Region opened successfully at 1732040884688 (+1 ms) 2024-11-19T18:28:04,690 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68., pid=6, masterSystemTime=1732040884661 2024-11-19T18:28:04,694 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,694 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:04,696 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ff6ae4e1d66794cb849e981de96ff68, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,42803,1732040882153 2024-11-19T18:28:04,697 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46305 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=30db5f576be8,42803,1732040882153, table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-19T18:28:04,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ff6ae4e1d66794cb849e981de96ff68, server=30db5f576be8,42803,1732040882153 because future has completed 2024-11-19T18:28:04,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T18:28:04,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ff6ae4e1d66794cb849e981de96ff68, server=30db5f576be8,42803,1732040882153 in 196 msec 2024-11-19T18:28:04,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T18:28:04,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ff6ae4e1d66794cb849e981de96ff68, ASSIGN in 362 msec 2024-11-19T18:28:04,713 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T18:28:04,714 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732040884713"}]},"ts":"1732040884713"} 2024-11-19T18:28:04,718 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-19T18:28:04,720 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T18:28:04,724 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 467 msec 2024-11-19T18:28:09,368 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T18:28:09,421 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T18:28:09,422 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-19T18:28:11,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:28:11,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T18:28:11,863 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T18:28:11,863 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T18:28:11,864 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:28:11,864 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T18:28:11,864 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T18:28:11,864 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T18:28:14,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46305 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:28:14,293 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-19T18:28:14,296 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-19T18:28:14,301 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-19T18:28:14,302 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:28:14,303 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040894303 2024-11-19T18:28:14,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:14,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:14,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:14,312 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:14,312 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:14,312 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040883416 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040894303 2024-11-19T18:28:14,313 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:28:14,314 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040883416 is not closed yet, will try archiving it next time 2024-11-19T18:28:14,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741833_1009 (size=451) 2024-11-19T18:28:14,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741833_1009 (size=451) 2024-11-19T18:28:14,317 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040883416 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040883416 2024-11-19T18:28:14,323 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68., hostname=30db5f576be8,42803,1732040882153, seqNum=2] 2024-11-19T18:28:26,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42803 {}] regionserver.HRegion(8855): Flush requested on 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:26,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ff6ae4e1d66794cb849e981de96ff68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:28:26,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/96d81efc6c3f4fb38082fab98fabec5c is 1080, key is row0001/info:/1732040894326/Put/seqid=0 2024-11-19T18:28:26,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741838_1014 (size=12509) 2024-11-19T18:28:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741838_1014 (size=12509) 2024-11-19T18:28:26,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/96d81efc6c3f4fb38082fab98fabec5c 2024-11-19T18:28:26,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/96d81efc6c3f4fb38082fab98fabec5c as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c 2024-11-19T18:28:26,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T18:28:26,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 135ms, sequenceid=11, compaction requested=false 2024-11-19T18:28:26,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ff6ae4e1d66794cb849e981de96ff68: 2024-11-19T18:28:30,564 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:28:34,370 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040914370 2024-11-19T18:28:34,578 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:34,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:34,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:34,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:34,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:34,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:34,579 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040894303 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040914370 2024-11-19T18:28:34,580 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:28:34,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040894303 is not closed yet, will try archiving it next time 2024-11-19T18:28:34,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741837_1013 (size=12399) 2024-11-19T18:28:34,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741837_1013 (size=12399) 2024-11-19T18:28:34,784 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:36,988 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:39,192 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:41,397 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42803 {}] regionserver.HRegion(8855): Flush requested on 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:28:41,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ff6ae4e1d66794cb849e981de96ff68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:28:41,599 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:41,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/00600c8a70fd48e5b27359775ba04536 is 1080, key is row0008/info:/1732040908359/Put/seqid=0 2024-11-19T18:28:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741840_1016 (size=12509) 2024-11-19T18:28:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741840_1016 (size=12509) 2024-11-19T18:28:41,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/00600c8a70fd48e5b27359775ba04536 2024-11-19T18:28:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/00600c8a70fd48e5b27359775ba04536 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536 2024-11-19T18:28:41,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536, entries=7, sequenceid=21, filesize=12.2 K 2024-11-19T18:28:41,837 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:41,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 440ms, sequenceid=21, compaction requested=false 2024-11-19T18:28:41,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ff6ae4e1d66794cb849e981de96ff68: 2024-11-19T18:28:41,837 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-19T18:28:41,837 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:28:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c because midkey is the same as first or last row 2024-11-19T18:28:43,602 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:44,159 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T18:28:44,159 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T18:28:45,807 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:45,809 WARN [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:45,809 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C42803%2C1732040882153:(num 1732040914370) roll requested 2024-11-19T18:28:45,810 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040925810 2024-11-19T18:28:46,018 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:46,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:46,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:46,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:46,019 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:46,019 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:28:46,020 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040914370 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040925810 2024-11-19T18:28:46,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741839_1015 (size=7739) 2024-11-19T18:28:46,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741839_1015 (size=7739) 2024-11-19T18:28:46,027 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:28:46,027 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040914370 is not closed yet, will try archiving it next time 2024-11-19T18:28:46,028 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040894303 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040894303 2024-11-19T18:28:48,011 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:49,670 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1ff6ae4e1d66794cb849e981de96ff68, had cached 0 bytes from a total of 25018 2024-11-19T18:28:50,216 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:52,420 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:54,624 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:28:56,627 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T18:28:56,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040936627 2024-11-19T18:29:00,565 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:29:01,637 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:01,638 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:01,638 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C42803%2C1732040882153:(num 1732040936627) roll requested 2024-11-19T18:29:01,639 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:01,639 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:01,639 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:01,639 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:01,639 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:01,639 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040925810 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040936627 2024-11-19T18:29:01,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741841_1017 (size=4753) 2024-11-19T18:29:01,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741841_1017 (size=4753) 2024-11-19T18:29:01,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:29:01,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040925810 is not closed yet, will try archiving it next time 2024-11-19T18:29:01,649 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040941648 2024-11-19T18:29:06,652 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:06,652 WARN [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42803 {}] regionserver.HRegion(8855): Flush requested on 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:29:06,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ff6ae4e1d66794cb849e981de96ff68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:29:06,666 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5014 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:06,667 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5014 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:08,654 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T18:29:11,655 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:11,655 WARN [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK], DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK]] 2024-11-19T18:29:11,656 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:11,656 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:11,656 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:11,656 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:11,656 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:11,657 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040936627 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040941648 2024-11-19T18:29:11,658 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39983:39983),(127.0.0.1/127.0.0.1:33663:33663)] 2024-11-19T18:29:11,658 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040936627 is not closed yet, will try archiving it next time 2024-11-19T18:29:11,658 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C42803%2C1732040882153:(num 1732040941648) roll requested 2024-11-19T18:29:11,659 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040951658 2024-11-19T18:29:11,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741842_1018 (size=1569) 2024-11-19T18:29:11,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741842_1018 (size=1569) 2024-11-19T18:29:11,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/7f3fa9d81b0b4c36a34baf108fba8804 is 1080, key is row0015/info:/1732040923400/Put/seqid=0 2024-11-19T18:29:11,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741844_1020 (size=12509) 2024-11-19T18:29:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741844_1020 (size=12509) 2024-11-19T18:29:11,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/7f3fa9d81b0b4c36a34baf108fba8804 2024-11-19T18:29:11,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/7f3fa9d81b0b4c36a34baf108fba8804 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804 2024-11-19T18:29:11,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804, entries=7, sequenceid=31, filesize=12.2 K 2024-11-19T18:29:16,666 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK], DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK]] 2024-11-19T18:29:16,667 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK], DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK]] 2024-11-19T18:29:16,697 INFO [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK], DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK]] 2024-11-19T18:29:16,697 WARN [FSHLog-0-hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198-prefix:30db5f576be8,42803,1732040882153 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45723,DS-b3fa2fd3-f3d3-4afa-8733-d05de0470350,DISK], DatanodeInfoWithStorage[127.0.0.1:34527,DS-255e42af-274e-4796-be06-863aaad1d285,DISK]] 2024-11-19T18:29:16,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 10045ms, sequenceid=31, compaction requested=true 2024-11-19T18:29:16,697 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ff6ae4e1d66794cb849e981de96ff68: 2024-11-19T18:29:16,698 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,698 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-19T18:29:16,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,698 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:29:16,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,698 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c because midkey is the same as first or last row 2024-11-19T18:29:16,698 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040941648 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040951658 2024-11-19T18:29:16,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ff6ae4e1d66794cb849e981de96ff68:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:29:16,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741843_1019 (size=438) 2024-11-19T18:29:16,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741843_1019 (size=438) 2024-11-19T18:29:16,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:29:16,705 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:29:16,705 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C42803%2C1732040882153:(num 1732040951658) roll requested 2024-11-19T18:29:16,705 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040914370 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040914370 2024-11-19T18:29:16,705 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:29:16,705 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040956705 2024-11-19T18:29:16,707 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040925810 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040925810 2024-11-19T18:29:16,709 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:29:16,709 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040936627 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040936627 2024-11-19T18:29:16,710 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HStore(1541): 1ff6ae4e1d66794cb849e981de96ff68/info is initiating minor compaction (all files) 2024-11-19T18:29:16,711 INFO [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1ff6ae4e1d66794cb849e981de96ff68/info in TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:16,711 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040941648 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040941648 2024-11-19T18:29:16,711 INFO [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804] into tmpdir=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp, totalSize=36.6 K 2024-11-19T18:29:16,713 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96d81efc6c3f4fb38082fab98fabec5c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732040894326 2024-11-19T18:29:16,714 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00600c8a70fd48e5b27359775ba04536, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732040908359 2024-11-19T18:29:16,715 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f3fa9d81b0b4c36a34baf108fba8804, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732040923400 2024-11-19T18:29:16,735 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,735 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,736 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,736 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,736 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,736 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040951658 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040956705 2024-11-19T18:29:16,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741845_1021 (size=93) 2024-11-19T18:29:16,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741845_1021 (size=93) 2024-11-19T18:29:16,740 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040951658 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs/30db5f576be8%2C42803%2C1732040882153.1732040951658 2024-11-19T18:29:16,740 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:29:16,741 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42803%2C1732040882153.1732040956740 2024-11-19T18:29:16,750 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,750 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,750 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,750 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,750 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:16,751 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040956705 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040956740 2024-11-19T18:29:16,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741846_1022 (size=1258) 2024-11-19T18:29:16,753 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33663:33663),(127.0.0.1/127.0.0.1:39983:39983)] 2024-11-19T18:29:16,753 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/WALs/30db5f576be8,42803,1732040882153/30db5f576be8%2C42803%2C1732040882153.1732040956705 is not closed yet, will try archiving it next time 2024-11-19T18:29:16,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741846_1022 (size=1258) 2024-11-19T18:29:16,758 INFO [RS:0;30db5f576be8:42803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ff6ae4e1d66794cb849e981de96ff68#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:29:16,760 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/f1bfadff3e134ab6afec30b8e997d87a is 1080, key is row0001/info:/1732040894326/Put/seqid=0 2024-11-19T18:29:16,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741848_1024 (size=27710) 2024-11-19T18:29:16,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741848_1024 (size=27710) 2024-11-19T18:29:16,794 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/f1bfadff3e134ab6afec30b8e997d87a as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/f1bfadff3e134ab6afec30b8e997d87a 2024-11-19T18:29:16,817 INFO [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1ff6ae4e1d66794cb849e981de96ff68/info of 1ff6ae4e1d66794cb849e981de96ff68 into f1bfadff3e134ab6afec30b8e997d87a(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:29:16,817 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1ff6ae4e1d66794cb849e981de96ff68: 2024-11-19T18:29:16,819 INFO [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68., storeName=1ff6ae4e1d66794cb849e981de96ff68/info, priority=13, startTime=1732040956699; duration=0sec 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/f1bfadff3e134ab6afec30b8e997d87a because midkey is the same as first or last row 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:29:16,820 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/f1bfadff3e134ab6afec30b8e997d87a because midkey is the same as first or last row 2024-11-19T18:29:16,821 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T18:29:16,821 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:29:16,821 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/f1bfadff3e134ab6afec30b8e997d87a because midkey is the same as first or last row 2024-11-19T18:29:16,821 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:29:16,821 DEBUG [RS:0;30db5f576be8:42803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ff6ae4e1d66794cb849e981de96ff68:info 2024-11-19T18:29:28,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42803 {}] regionserver.HRegion(8855): Flush requested on 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:29:28,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ff6ae4e1d66794cb849e981de96ff68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:29:28,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/283f112561a34ea9ac6719489b0c7eb2 is 1080, key is row0022/info:/1732040956742/Put/seqid=0 2024-11-19T18:29:28,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741849_1025 (size=12509) 2024-11-19T18:29:28,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741849_1025 (size=12509) 2024-11-19T18:29:28,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/283f112561a34ea9ac6719489b0c7eb2 2024-11-19T18:29:28,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/283f112561a34ea9ac6719489b0c7eb2 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/283f112561a34ea9ac6719489b0c7eb2 2024-11-19T18:29:28,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/283f112561a34ea9ac6719489b0c7eb2, entries=7, sequenceid=42, filesize=12.2 K 2024-11-19T18:29:28,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 46ms, sequenceid=42, compaction requested=false 2024-11-19T18:29:28,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ff6ae4e1d66794cb849e981de96ff68: 2024-11-19T18:29:28,813 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-19T18:29:28,813 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:29:28,813 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/f1bfadff3e134ab6afec30b8e997d87a because midkey is the same as first or last row 2024-11-19T18:29:30,565 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:29:34,670 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1ff6ae4e1d66794cb849e981de96ff68, had cached 0 bytes from a total of 40219 2024-11-19T18:29:36,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:29:36,780 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:29:36,781 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:36,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:36,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:36,786 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:29:36,786 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:29:36,786 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2135136293, stopped=false 2024-11-19T18:29:36,787 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,46305,1732040881439 2024-11-19T18:29:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:36,789 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:29:36,789 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:29:36,790 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:36,790 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:36,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:36,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:36,790 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,42803,1732040882153' ***** 2024-11-19T18:29:36,790 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:29:36,791 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:29:36,791 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:29:36,791 INFO [RS:0;30db5f576be8:42803 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:29:36,791 INFO [RS:0;30db5f576be8:42803 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:29:36,791 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:29:36,792 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,42803,1732040882153 2024-11-19T18:29:36,792 INFO [RS:0;30db5f576be8:42803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:29:36,792 INFO [RS:0;30db5f576be8:42803 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:42803. 2024-11-19T18:29:36,792 DEBUG [RS:0;30db5f576be8:42803 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:36,792 DEBUG [RS:0;30db5f576be8:42803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1ff6ae4e1d66794cb849e981de96ff68, disabling compactions & flushes 2024-11-19T18:29:36,793 INFO [RS:0;30db5f576be8:42803 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:29:36,793 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:36,793 INFO [RS:0;30db5f576be8:42803 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:36,793 INFO [RS:0;30db5f576be8:42803 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. after waiting 0 ms 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:36,793 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:29:36,793 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1ff6ae4e1d66794cb849e981de96ff68 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-19T18:29:36,793 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:29:36,793 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1325): Online Regions={1ff6ae4e1d66794cb849e981de96ff68=TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T18:29:36,793 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:29:36,793 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:29:36,794 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:29:36,794 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:29:36,794 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-19T18:29:36,794 DEBUG [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1ff6ae4e1d66794cb849e981de96ff68 2024-11-19T18:29:36,800 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/d13430686b394428b1c38265ac5012c6 is 1080, key is row0029/info:/1732040970769/Put/seqid=0 2024-11-19T18:29:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741850_1026 (size=8193) 2024-11-19T18:29:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741850_1026 (size=8193) 2024-11-19T18:29:36,812 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/d13430686b394428b1c38265ac5012c6 2024-11-19T18:29:36,821 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/.tmp/info/d13430686b394428b1c38265ac5012c6 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/d13430686b394428b1c38265ac5012c6 2024-11-19T18:29:36,822 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/info/2264b93b91dc4238a55e77deba9e108d is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68./info:regioninfo/1732040884696/Put/seqid=0 2024-11-19T18:29:36,830 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/d13430686b394428b1c38265ac5012c6, entries=3, sequenceid=48, filesize=8.0 K 2024-11-19T18:29:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741851_1027 (size=7016) 2024-11-19T18:29:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741851_1027 (size=7016) 2024-11-19T18:29:36,831 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/info/2264b93b91dc4238a55e77deba9e108d 2024-11-19T18:29:36,833 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 40ms, sequenceid=48, compaction requested=true 2024-11-19T18:29:36,833 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804] to archive 2024-11-19T18:29:36,837 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:29:36,840 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/96d81efc6c3f4fb38082fab98fabec5c 2024-11-19T18:29:36,842 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/00600c8a70fd48e5b27359775ba04536 2024-11-19T18:29:36,844 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804 to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/info/7f3fa9d81b0b4c36a34baf108fba8804 2024-11-19T18:29:36,857 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/ns/0e7385dfaa5144f79b3dde6d523d7db2 is 43, key is default/ns:d/1732040883987/Put/seqid=0 2024-11-19T18:29:36,857 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30db5f576be8:46305 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-19T18:29:36,862 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [96d81efc6c3f4fb38082fab98fabec5c=12509, 00600c8a70fd48e5b27359775ba04536=12509, 7f3fa9d81b0b4c36a34baf108fba8804=12509] 2024-11-19T18:29:36,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741852_1028 (size=5153) 2024-11-19T18:29:36,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741852_1028 (size=5153) 2024-11-19T18:29:36,867 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/ns/0e7385dfaa5144f79b3dde6d523d7db2 2024-11-19T18:29:36,869 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/default/TestLogRolling-testSlowSyncLogRolling/1ff6ae4e1d66794cb849e981de96ff68/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-19T18:29:36,873 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:36,873 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1ff6ae4e1d66794cb849e981de96ff68: Waiting for close lock at 1732040976792Running coprocessor pre-close hooks at 1732040976792Disabling compacts and flushes for region at 1732040976792Disabling writes for close at 1732040976793 (+1 ms)Obtaining lock to block concurrent updates at 1732040976793Preparing flush snapshotting stores in 1ff6ae4e1d66794cb849e981de96ff68 at 1732040976793Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732040976794 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. at 1732040976795 (+1 ms)Flushing 1ff6ae4e1d66794cb849e981de96ff68/info: creating writer at 1732040976795Flushing 1ff6ae4e1d66794cb849e981de96ff68/info: appending metadata at 1732040976799 (+4 ms)Flushing 1ff6ae4e1d66794cb849e981de96ff68/info: closing flushed file at 1732040976799Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74121f: reopening flushed file at 1732040976820 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1ff6ae4e1d66794cb849e981de96ff68 in 40ms, sequenceid=48, compaction requested=true at 1732040976833 (+13 ms)Writing region close event to WAL at 1732040976863 (+30 ms)Running coprocessor post-close hooks at 1732040976870 (+7 ms)Closed at 1732040976872 (+2 ms) 2024-11-19T18:29:36,873 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732040884246.1ff6ae4e1d66794cb849e981de96ff68. 2024-11-19T18:29:36,897 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/table/d6b0cc1b1ab84c098ee67e277c186662 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732040884713/Put/seqid=0 2024-11-19T18:29:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741853_1029 (size=5396) 2024-11-19T18:29:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741853_1029 (size=5396) 2024-11-19T18:29:36,905 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/table/d6b0cc1b1ab84c098ee67e277c186662 2024-11-19T18:29:36,914 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/info/2264b93b91dc4238a55e77deba9e108d as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/info/2264b93b91dc4238a55e77deba9e108d 2024-11-19T18:29:36,923 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/info/2264b93b91dc4238a55e77deba9e108d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T18:29:36,925 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/ns/0e7385dfaa5144f79b3dde6d523d7db2 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/ns/0e7385dfaa5144f79b3dde6d523d7db2 2024-11-19T18:29:36,935 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/ns/0e7385dfaa5144f79b3dde6d523d7db2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T18:29:36,937 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/.tmp/table/d6b0cc1b1ab84c098ee67e277c186662 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/table/d6b0cc1b1ab84c098ee67e277c186662 2024-11-19T18:29:36,947 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/table/d6b0cc1b1ab84c098ee67e277c186662, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T18:29:36,949 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-11-19T18:29:36,955 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T18:29:36,956 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:29:36,957 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:36,957 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732040976793Running coprocessor pre-close hooks at 1732040976793Disabling compacts and flushes for region at 1732040976793Disabling writes for close at 1732040976794 (+1 ms)Obtaining lock to block concurrent updates at 1732040976794Preparing flush snapshotting stores in 1588230740 at 1732040976794Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732040976794Flushing stores of hbase:meta,,1.1588230740 at 1732040976795 (+1 ms)Flushing 1588230740/info: creating writer at 1732040976795Flushing 1588230740/info: appending metadata at 1732040976822 (+27 ms)Flushing 1588230740/info: closing flushed file at 1732040976822Flushing 1588230740/ns: creating writer at 1732040976840 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732040976857 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732040976857Flushing 1588230740/table: creating writer at 1732040976876 (+19 ms)Flushing 1588230740/table: appending metadata at 1732040976896 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732040976896Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bde5e3a: reopening flushed file at 1732040976913 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bf77160: reopening flushed file at 1732040976924 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cc4a74e: reopening flushed file at 1732040976936 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1732040976949 (+13 ms)Writing region close event to WAL at 1732040976950 (+1 ms)Running coprocessor post-close hooks at 1732040976956 (+6 ms)Closed at 1732040976956 2024-11-19T18:29:36,957 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:36,994 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,42803,1732040882153; all regions closed. 2024-11-19T18:29:36,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:36,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:36,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:36,997 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:36,997 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741834_1010 (size=3066) 2024-11-19T18:29:37,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741834_1010 (size=3066) 2024-11-19T18:29:37,004 DEBUG [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs 2024-11-19T18:29:37,004 INFO [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C42803%2C1732040882153.meta:.meta(num 1732040883838) 2024-11-19T18:29:37,005 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,005 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,005 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,005 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,005 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741847_1023 (size=12695) 2024-11-19T18:29:37,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741847_1023 (size=12695) 2024-11-19T18:29:37,012 DEBUG [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/oldWALs 2024-11-19T18:29:37,012 INFO [RS:0;30db5f576be8:42803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C42803%2C1732040882153:(num 1732040956740) 2024-11-19T18:29:37,012 DEBUG [RS:0;30db5f576be8:42803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:37,012 INFO [RS:0;30db5f576be8:42803 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:29:37,013 INFO [RS:0;30db5f576be8:42803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:29:37,013 INFO [RS:0;30db5f576be8:42803 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:29:37,013 INFO [RS:0;30db5f576be8:42803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:29:37,013 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:29:37,014 INFO [RS:0;30db5f576be8:42803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42803 2024-11-19T18:29:37,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,42803,1732040882153 2024-11-19T18:29:37,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:29:37,018 INFO [RS:0;30db5f576be8:42803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:29:37,019 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,42803,1732040882153] 2024-11-19T18:29:37,022 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,42803,1732040882153 already deleted, retry=false 2024-11-19T18:29:37,023 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,42803,1732040882153 expired; onlineServers=0 2024-11-19T18:29:37,023 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,46305,1732040881439' ***** 2024-11-19T18:29:37,023 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:29:37,023 INFO [M:0;30db5f576be8:46305 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:29:37,023 INFO [M:0;30db5f576be8:46305 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:29:37,023 DEBUG [M:0;30db5f576be8:46305 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:29:37,023 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:29:37,023 DEBUG [M:0;30db5f576be8:46305 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:29:37,023 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040883111 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040883111,5,FailOnTimeoutGroup] 2024-11-19T18:29:37,023 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040883110 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040883110,5,FailOnTimeoutGroup] 2024-11-19T18:29:37,024 INFO [M:0;30db5f576be8:46305 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:29:37,024 INFO [M:0;30db5f576be8:46305 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:29:37,024 DEBUG [M:0;30db5f576be8:46305 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:29:37,024 INFO [M:0;30db5f576be8:46305 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:29:37,024 INFO [M:0;30db5f576be8:46305 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:29:37,025 INFO [M:0;30db5f576be8:46305 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:29:37,025 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:29:37,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:29:37,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:37,026 DEBUG [M:0;30db5f576be8:46305 {}] zookeeper.ZKUtil(347): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:29:37,026 WARN [M:0;30db5f576be8:46305 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:29:37,027 INFO [M:0;30db5f576be8:46305 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/.lastflushedseqids 2024-11-19T18:29:37,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741854_1030 (size=130) 2024-11-19T18:29:37,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741854_1030 (size=130) 2024-11-19T18:29:37,044 INFO [M:0;30db5f576be8:46305 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:29:37,044 INFO [M:0;30db5f576be8:46305 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:29:37,044 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:29:37,044 INFO [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:37,044 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:37,044 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:29:37,045 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:37,045 INFO [M:0;30db5f576be8:46305 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-11-19T18:29:37,066 DEBUG [M:0;30db5f576be8:46305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fb13fc159d844570a613c694e3b99cee is 82, key is hbase:meta,,1/info:regioninfo/1732040883910/Put/seqid=0 2024-11-19T18:29:37,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741855_1031 (size=5672) 2024-11-19T18:29:37,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741855_1031 (size=5672) 2024-11-19T18:29:37,074 INFO [M:0;30db5f576be8:46305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fb13fc159d844570a613c694e3b99cee 2024-11-19T18:29:37,103 DEBUG [M:0;30db5f576be8:46305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1502984f79ed42c4b828bd3e852f323a is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732040884722/Put/seqid=0 2024-11-19T18:29:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741856_1032 (size=6246) 2024-11-19T18:29:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741856_1032 (size=6246) 2024-11-19T18:29:37,112 INFO [M:0;30db5f576be8:46305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1502984f79ed42c4b828bd3e852f323a 2024-11-19T18:29:37,119 INFO [M:0;30db5f576be8:46305 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1502984f79ed42c4b828bd3e852f323a 2024-11-19T18:29:37,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:37,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42803-0x101317c5c810001, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:37,123 INFO [RS:0;30db5f576be8:42803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:29:37,123 INFO [RS:0;30db5f576be8:42803 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,42803,1732040882153; zookeeper connection closed. 2024-11-19T18:29:37,123 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@acaa498 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@acaa498 2024-11-19T18:29:37,124 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:29:37,136 DEBUG [M:0;30db5f576be8:46305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa2d2ab2b8804e3d9f3eb31936c04983 is 69, key is 30db5f576be8,42803,1732040882153/rs:state/1732040883179/Put/seqid=0 2024-11-19T18:29:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741857_1033 (size=5156) 2024-11-19T18:29:37,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741857_1033 (size=5156) 2024-11-19T18:29:37,144 INFO [M:0;30db5f576be8:46305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa2d2ab2b8804e3d9f3eb31936c04983 2024-11-19T18:29:37,170 DEBUG [M:0;30db5f576be8:46305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb8ca6772a2a4207bccce0ca2c4f75bb is 52, key is load_balancer_on/state:d/1732040884225/Put/seqid=0 2024-11-19T18:29:37,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741858_1034 (size=5056) 2024-11-19T18:29:37,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741858_1034 (size=5056) 2024-11-19T18:29:37,177 INFO [M:0;30db5f576be8:46305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb8ca6772a2a4207bccce0ca2c4f75bb 2024-11-19T18:29:37,185 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fb13fc159d844570a613c694e3b99cee as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fb13fc159d844570a613c694e3b99cee 2024-11-19T18:29:37,191 INFO [M:0;30db5f576be8:46305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fb13fc159d844570a613c694e3b99cee, entries=8, sequenceid=59, filesize=5.5 K 2024-11-19T18:29:37,193 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1502984f79ed42c4b828bd3e852f323a as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1502984f79ed42c4b828bd3e852f323a 2024-11-19T18:29:37,201 INFO [M:0;30db5f576be8:46305 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1502984f79ed42c4b828bd3e852f323a 2024-11-19T18:29:37,201 INFO [M:0;30db5f576be8:46305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1502984f79ed42c4b828bd3e852f323a, entries=6, sequenceid=59, filesize=6.1 K 2024-11-19T18:29:37,202 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa2d2ab2b8804e3d9f3eb31936c04983 as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aa2d2ab2b8804e3d9f3eb31936c04983 2024-11-19T18:29:37,210 INFO [M:0;30db5f576be8:46305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aa2d2ab2b8804e3d9f3eb31936c04983, entries=1, sequenceid=59, filesize=5.0 K 2024-11-19T18:29:37,211 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb8ca6772a2a4207bccce0ca2c4f75bb as hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb8ca6772a2a4207bccce0ca2c4f75bb 2024-11-19T18:29:37,219 INFO [M:0;30db5f576be8:46305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb8ca6772a2a4207bccce0ca2c4f75bb, entries=1, sequenceid=59, filesize=4.9 K 2024-11-19T18:29:37,221 INFO [M:0;30db5f576be8:46305 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 176ms, sequenceid=59, compaction requested=false 2024-11-19T18:29:37,223 INFO [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:37,223 DEBUG [M:0;30db5f576be8:46305 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732040977044Disabling compacts and flushes for region at 1732040977044Disabling writes for close at 1732040977044Obtaining lock to block concurrent updates at 1732040977045 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732040977045Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1732040977046 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732040977047 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732040977047Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732040977065 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732040977065Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732040977082 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732040977102 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732040977102Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732040977119 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732040977136 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732040977136Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732040977152 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732040977169 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732040977169Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bd6f546: reopening flushed file at 1732040977184 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77a95cd7: reopening flushed file at 1732040977192 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bb797fc: reopening flushed file at 1732040977201 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ece784c: reopening flushed file at 1732040977210 (+9 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 176ms, sequenceid=59, compaction requested=false at 1732040977221 (+11 ms)Writing region close event to WAL at 1732040977223 (+2 ms)Closed at 1732040977223 2024-11-19T18:29:37,224 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,224 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,224 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:37,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34527 is added to blk_1073741830_1006 (size=27961) 2024-11-19T18:29:37,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45723 is added to blk_1073741830_1006 (size=27961) 2024-11-19T18:29:37,228 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:29:37,228 INFO [M:0;30db5f576be8:46305 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:29:37,228 INFO [M:0;30db5f576be8:46305 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46305 2024-11-19T18:29:37,228 INFO [M:0;30db5f576be8:46305 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:29:37,263 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:29:37,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:37,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46305-0x101317c5c810000, quorum=127.0.0.1:64665, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:37,330 INFO [M:0;30db5f576be8:46305 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:29:37,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ca1952e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:37,338 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:37,338 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:37,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:37,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:37,342 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:37,342 WARN [BP-342907355-172.17.0.2-1732040878628 heartbeating to localhost/127.0.0.1:34103 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:37,342 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:37,342 WARN [BP-342907355-172.17.0.2-1732040878628 heartbeating to localhost/127.0.0.1:34103 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342907355-172.17.0.2-1732040878628 (Datanode Uuid 624bf150-708b-42e5-a448-0cf2d7b3e7a6) service to localhost/127.0.0.1:34103 2024-11-19T18:29:37,344 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data3/current/BP-342907355-172.17.0.2-1732040878628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:37,344 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data4/current/BP-342907355-172.17.0.2-1732040878628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:37,345 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:37,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ca8488f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:37,354 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:37,354 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:37,354 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:37,354 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:37,356 WARN [BP-342907355-172.17.0.2-1732040878628 heartbeating to localhost/127.0.0.1:34103 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:37,356 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:37,356 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:37,356 WARN [BP-342907355-172.17.0.2-1732040878628 heartbeating to localhost/127.0.0.1:34103 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342907355-172.17.0.2-1732040878628 (Datanode Uuid 68bc039d-f2d9-424a-8e87-ba7b1931e499) service to localhost/127.0.0.1:34103 2024-11-19T18:29:37,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data1/current/BP-342907355-172.17.0.2-1732040878628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:37,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/cluster_c64829ee-3a2a-a740-e1f5-c4a5e706aacd/data/data2/current/BP-342907355-172.17.0.2-1732040878628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:37,357 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:37,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:29:37,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:37,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:37,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:37,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:37,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:29:37,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:29:37,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/30db5f576be8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34103 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@53abf30 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34103 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:34103 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34103 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34103 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/30db5f576be8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/30db5f576be8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=256 (was 156) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7426 (was 7404) - AvailableMemoryMB LEAK? - 2024-11-19T18:29:37,426 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=256, ProcessCount=11, AvailableMemoryMB=7426 2024-11-19T18:29:37,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.log.dir so I do NOT create it in target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c58bcbe0-adea-bc05-f37c-ca499de180b7/hadoop.tmp.dir so I do NOT create it in target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec, deleteOnExit=true 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/test.cache.data in system properties and HBase conf 2024-11-19T18:29:37,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:29:37,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:29:37,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:29:37,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:29:37,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:29:37,428 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:29:37,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:29:37,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:29:37,447 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:29:37,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:37,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:37,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:37,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:37,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:29:37,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:37,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a78e1c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:37,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@534816f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:37,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@788684ca{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/java.io.tmpdir/jetty-localhost-43751-hadoop-hdfs-3_4_1-tests_jar-_-any-13184454476643144020/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:29:37,675 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ae773f8{HTTP/1.1, (http/1.1)}{localhost:43751} 2024-11-19T18:29:37,675 INFO [Time-limited test {}] server.Server(415): Started @101058ms 2024-11-19T18:29:37,690 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:29:37,763 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:37,767 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:37,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:37,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:37,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:29:37,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a0544f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:37,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43fff29e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:37,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65506a11{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/java.io.tmpdir/jetty-localhost-34137-hadoop-hdfs-3_4_1-tests_jar-_-any-7559020586317427954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:37,889 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@670e4080{HTTP/1.1, (http/1.1)}{localhost:34137} 2024-11-19T18:29:37,889 INFO [Time-limited test {}] server.Server(415): Started @101271ms 2024-11-19T18:29:37,891 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:37,936 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:37,941 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:37,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:37,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:37,941 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:29:37,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@81550dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:37,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cc26d13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:38,001 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data1/current/BP-1966740921-172.17.0.2-1732040977466/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:38,001 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data2/current/BP-1966740921-172.17.0.2-1732040977466/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:38,030 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:38,033 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x416f1a2f74629af7 with lease ID 0x1e2baafe0873a652: Processing first storage report for DS-8dea9964-4ad8-44a4-bd64-0a83db3998f2 from datanode DatanodeRegistration(127.0.0.1:39567, datanodeUuid=84c4361a-e4e5-4575-93d9-fac89bfe7245, infoPort=43761, infoSecurePort=0, ipcPort=44283, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466) 2024-11-19T18:29:38,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x416f1a2f74629af7 with lease ID 0x1e2baafe0873a652: from storage DS-8dea9964-4ad8-44a4-bd64-0a83db3998f2 node DatanodeRegistration(127.0.0.1:39567, datanodeUuid=84c4361a-e4e5-4575-93d9-fac89bfe7245, infoPort=43761, infoSecurePort=0, ipcPort=44283, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:38,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x416f1a2f74629af7 with lease ID 0x1e2baafe0873a652: Processing first storage report for DS-3052dc5f-700f-4dc2-ab94-3901c588a02d from datanode DatanodeRegistration(127.0.0.1:39567, datanodeUuid=84c4361a-e4e5-4575-93d9-fac89bfe7245, infoPort=43761, infoSecurePort=0, ipcPort=44283, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466) 2024-11-19T18:29:38,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x416f1a2f74629af7 with lease ID 0x1e2baafe0873a652: from storage DS-3052dc5f-700f-4dc2-ab94-3901c588a02d node DatanodeRegistration(127.0.0.1:39567, datanodeUuid=84c4361a-e4e5-4575-93d9-fac89bfe7245, infoPort=43761, infoSecurePort=0, ipcPort=44283, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:38,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c89f457{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/java.io.tmpdir/jetty-localhost-46675-hadoop-hdfs-3_4_1-tests_jar-_-any-249416790108628566/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:38,077 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18f27499{HTTP/1.1, (http/1.1)}{localhost:46675} 2024-11-19T18:29:38,077 INFO [Time-limited test {}] server.Server(415): Started @101460ms 2024-11-19T18:29:38,079 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:38,189 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data3/current/BP-1966740921-172.17.0.2-1732040977466/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:38,189 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data4/current/BP-1966740921-172.17.0.2-1732040977466/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:38,207 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:38,210 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x408b16e9feee593f with lease ID 0x1e2baafe0873a653: Processing first storage report for DS-06d425be-7392-4d6a-ab50-7a0cd323f153 from datanode DatanodeRegistration(127.0.0.1:40973, datanodeUuid=de27e5fc-c4d8-440f-b49a-951e7d553d45, infoPort=38263, infoSecurePort=0, ipcPort=40189, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466) 2024-11-19T18:29:38,210 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x408b16e9feee593f with lease ID 0x1e2baafe0873a653: from storage DS-06d425be-7392-4d6a-ab50-7a0cd323f153 node DatanodeRegistration(127.0.0.1:40973, datanodeUuid=de27e5fc-c4d8-440f-b49a-951e7d553d45, infoPort=38263, infoSecurePort=0, ipcPort=40189, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:38,210 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x408b16e9feee593f with lease ID 0x1e2baafe0873a653: Processing first storage report for DS-be8c4f79-4536-4734-b32a-64c80e5134c5 from datanode DatanodeRegistration(127.0.0.1:40973, datanodeUuid=de27e5fc-c4d8-440f-b49a-951e7d553d45, infoPort=38263, infoSecurePort=0, ipcPort=40189, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466) 2024-11-19T18:29:38,210 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x408b16e9feee593f with lease ID 0x1e2baafe0873a653: from storage DS-be8c4f79-4536-4734-b32a-64c80e5134c5 node DatanodeRegistration(127.0.0.1:40973, datanodeUuid=de27e5fc-c4d8-440f-b49a-951e7d553d45, infoPort=38263, infoSecurePort=0, ipcPort=40189, storageInfo=lv=-57;cid=testClusterID;nsid=2145645413;c=1732040977466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:38,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f 2024-11-19T18:29:38,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/zookeeper_0, clientPort=55695, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:29:38,314 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55695 2024-11-19T18:29:38,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,316 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:29:38,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:29:38,329 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef with version=8 2024-11-19T18:29:38,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:29:38,333 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:29:38,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,333 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:29:38,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:29:38,334 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:29:38,334 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:29:38,336 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36205 2024-11-19T18:29:38,338 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36205 connecting to ZooKeeper ensemble=127.0.0.1:55695 2024-11-19T18:29:38,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362050x0, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:29:38,348 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36205-0x101317dda180000 connected 2024-11-19T18:29:38,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:38,382 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef, hbase.cluster.distributed=false 2024-11-19T18:29:38,384 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:29:38,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36205 2024-11-19T18:29:38,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36205 2024-11-19T18:29:38,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36205 2024-11-19T18:29:38,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36205 2024-11-19T18:29:38,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36205 2024-11-19T18:29:38,406 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:29:38,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,406 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:29:38,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:38,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:29:38,407 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:29:38,407 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:29:38,408 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42223 2024-11-19T18:29:38,409 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42223 connecting to ZooKeeper ensemble=127.0.0.1:55695 2024-11-19T18:29:38,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,413 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422230x0, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:29:38,421 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422230x0, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:38,422 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:29:38,424 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42223-0x101317dda180001 connected 2024-11-19T18:29:38,428 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:29:38,429 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:29:38,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:29:38,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42223 2024-11-19T18:29:38,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42223 2024-11-19T18:29:38,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42223 2024-11-19T18:29:38,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42223 2024-11-19T18:29:38,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42223 2024-11-19T18:29:38,455 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:36205 2024-11-19T18:29:38,455 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:38,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:38,458 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:29:38,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,460 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:29:38,461 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,36205,1732040978332 from backup master directory 2024-11-19T18:29:38,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:38,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:38,464 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:29:38,464 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,470 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/hbase.id] with ID: a384b180-7b21-4843-a993-55743a0d5a26 2024-11-19T18:29:38,470 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/.tmp/hbase.id 2024-11-19T18:29:38,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:29:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:29:38,483 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/.tmp/hbase.id]:[hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/hbase.id] 2024-11-19T18:29:38,503 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:38,504 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:29:38,505 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T18:29:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:29:38,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:29:38,529 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:29:38,530 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:29:38,530 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:38,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:29:38,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:29:38,547 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store 2024-11-19T18:29:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:29:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:29:38,557 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:38,557 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:29:38,557 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:38,557 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:38,558 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:29:38,558 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:38,558 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:38,558 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732040978557Disabling compacts and flushes for region at 1732040978557Disabling writes for close at 1732040978558 (+1 ms)Writing region close event to WAL at 1732040978558Closed at 1732040978558 2024-11-19T18:29:38,559 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/.initializing 2024-11-19T18:29:38,559 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/WALs/30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,563 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C36205%2C1732040978332, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/WALs/30db5f576be8,36205,1732040978332, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/oldWALs, maxLogs=10 2024-11-19T18:29:38,564 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C36205%2C1732040978332.1732040978563 2024-11-19T18:29:38,570 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/WALs/30db5f576be8,36205,1732040978332/30db5f576be8%2C36205%2C1732040978332.1732040978563 2024-11-19T18:29:38,570 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43761:43761),(127.0.0.1/127.0.0.1:38263:38263)] 2024-11-19T18:29:38,571 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:29:38,571 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:38,571 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,572 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:29:38,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:38,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:29:38,578 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:38,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:29:38,581 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:38,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:29:38,583 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:38,584 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,585 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,585 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,587 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,587 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,587 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:29:38,589 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:38,591 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:29:38,592 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817712, jitterRate=0.03977504372596741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:29:38,593 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732040978572Initializing all the Stores at 1732040978573 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040978573Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040978574 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040978574Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040978574Cleaning up temporary data from old regions at 1732040978587 (+13 ms)Region opened successfully at 1732040978593 (+6 ms) 2024-11-19T18:29:38,593 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:29:38,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698bd96b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:29:38,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:29:38,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:29:38,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:29:38,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:29:38,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:29:38,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:29:38,601 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:29:38,605 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:29:38,606 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:29:38,607 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:29:38,608 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:29:38,609 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:29:38,610 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:29:38,610 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:29:38,611 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:29:38,613 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:29:38,614 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:29:38,615 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:29:38,617 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:29:38,618 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:29:38,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:38,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:38,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,621 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,36205,1732040978332, sessionid=0x101317dda180000, setting cluster-up flag (Was=false) 2024-11-19T18:29:38,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,631 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:29:38,632 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:38,645 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:29:38,646 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,36205,1732040978332 2024-11-19T18:29:38,647 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:29:38,650 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:38,650 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:29:38,650 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:29:38,650 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,36205,1732040978332 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:29:38,652 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,654 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041008654 2024-11-19T18:29:38,654 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:29:38,654 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:29:38,654 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:29:38,654 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:38,654 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:29:38,655 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:29:38,655 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:29:38,656 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:29:38,656 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:29:38,656 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,656 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040978656,5,FailOnTimeoutGroup] 2024-11-19T18:29:38,656 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:29:38,661 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040978656,5,FailOnTimeoutGroup] 2024-11-19T18:29:38,661 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,661 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:29:38,661 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,661 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:29:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:29:38,668 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:29:38,668 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef 2024-11-19T18:29:38,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:29:38,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:29:38,682 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:38,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:29:38,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:29:38,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:38,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:29:38,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:29:38,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:38,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:29:38,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:29:38,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:38,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:29:38,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:29:38,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:38,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:38,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:29:38,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740 2024-11-19T18:29:38,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740 2024-11-19T18:29:38,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:29:38,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:29:38,699 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:29:38,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:29:38,705 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:29:38,705 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723237, jitterRate=-0.08035707473754883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:29:38,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732040978682Initializing all the Stores at 1732040978684 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040978684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040978686 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040978686Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040978686Cleaning up temporary data from old regions at 1732040978698 (+12 ms)Region opened successfully at 1732040978707 (+9 ms) 2024-11-19T18:29:38,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:29:38,707 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:29:38,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:29:38,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:29:38,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:29:38,708 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:38,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732040978707Disabling compacts and flushes for region at 1732040978707Disabling writes for close at 1732040978707Writing region close event to WAL at 1732040978708 (+1 ms)Closed at 1732040978708 2024-11-19T18:29:38,709 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:38,709 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:29:38,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:29:38,712 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:29:38,713 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:29:38,743 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(746): ClusterId : a384b180-7b21-4843-a993-55743a0d5a26 2024-11-19T18:29:38,744 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:29:38,747 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:29:38,747 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:29:38,749 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:29:38,750 DEBUG [RS:0;30db5f576be8:42223 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c020847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:29:38,769 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:42223 2024-11-19T18:29:38,770 INFO [RS:0;30db5f576be8:42223 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:29:38,770 INFO [RS:0;30db5f576be8:42223 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:29:38,770 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:29:38,771 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,36205,1732040978332 with port=42223, startcode=1732040978406 2024-11-19T18:29:38,771 DEBUG [RS:0;30db5f576be8:42223 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:29:38,774 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:29:38,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36205 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36205 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,778 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef 2024-11-19T18:29:38,778 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40287 2024-11-19T18:29:38,778 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:29:38,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:29:38,782 DEBUG [RS:0;30db5f576be8:42223 {}] zookeeper.ZKUtil(111): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,782 WARN [RS:0;30db5f576be8:42223 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:29:38,782 INFO [RS:0;30db5f576be8:42223 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:38,782 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,42223,1732040978406] 2024-11-19T18:29:38,782 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/WALs/30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,787 INFO [RS:0;30db5f576be8:42223 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:29:38,790 INFO [RS:0;30db5f576be8:42223 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:29:38,791 INFO [RS:0;30db5f576be8:42223 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:29:38,791 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,791 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:29:38,792 INFO [RS:0;30db5f576be8:42223 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:29:38,793 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,793 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:38,794 DEBUG [RS:0;30db5f576be8:42223 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:38,800 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,800 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,800 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,800 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,800 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,801 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,42223,1732040978406-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:29:38,820 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:29:38,820 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,42223,1732040978406-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,820 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,821 INFO [RS:0;30db5f576be8:42223 {}] regionserver.Replication(171): 30db5f576be8,42223,1732040978406 started 2024-11-19T18:29:38,845 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:38,845 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,42223,1732040978406, RpcServer on 30db5f576be8/172.17.0.2:42223, sessionid=0x101317dda180001 2024-11-19T18:29:38,846 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:29:38,846 DEBUG [RS:0;30db5f576be8:42223 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,846 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,42223,1732040978406' 2024-11-19T18:29:38,846 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:29:38,847 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,42223,1732040978406 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,42223,1732040978406' 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:29:38,849 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:29:38,850 DEBUG [RS:0;30db5f576be8:42223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:29:38,850 INFO [RS:0;30db5f576be8:42223 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:29:38,850 INFO [RS:0;30db5f576be8:42223 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:29:38,864 WARN [30db5f576be8:36205 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:29:38,952 INFO [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C42223%2C1732040978406, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/WALs/30db5f576be8,42223,1732040978406, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/oldWALs, maxLogs=32 2024-11-19T18:29:38,954 INFO [RS:0;30db5f576be8:42223 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42223%2C1732040978406.1732040978954 2024-11-19T18:29:38,961 INFO [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/WALs/30db5f576be8,42223,1732040978406/30db5f576be8%2C42223%2C1732040978406.1732040978954 2024-11-19T18:29:38,962 DEBUG [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43761:43761),(127.0.0.1/127.0.0.1:38263:38263)] 2024-11-19T18:29:39,114 DEBUG [30db5f576be8:36205 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:29:39,115 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,42223,1732040978406 2024-11-19T18:29:39,116 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,42223,1732040978406, state=OPENING 2024-11-19T18:29:39,118 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:29:39,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:39,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:39,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:39,121 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:39,121 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:29:39,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,42223,1732040978406}] 2024-11-19T18:29:39,275 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:29:39,277 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35979, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:29:39,282 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:29:39,282 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:39,285 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C42223%2C1732040978406.meta, suffix=.meta, logDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/WALs/30db5f576be8,42223,1732040978406, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/oldWALs, maxLogs=32 2024-11-19T18:29:39,287 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C42223%2C1732040978406.meta.1732040979287.meta 2024-11-19T18:29:39,295 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/WALs/30db5f576be8,42223,1732040978406/30db5f576be8%2C42223%2C1732040978406.meta.1732040979287.meta 2024-11-19T18:29:39,296 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43761:43761),(127.0.0.1/127.0.0.1:38263:38263)] 2024-11-19T18:29:39,297 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:29:39,297 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:29:39,298 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:29:39,298 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:29:39,298 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:29:39,298 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:39,298 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:29:39,298 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:29:39,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:29:39,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:29:39,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:39,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:39,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:29:39,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:29:39,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:39,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:39,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:29:39,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:29:39,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:39,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:39,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:29:39,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:29:39,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:39,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:39,307 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:29:39,308 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740 2024-11-19T18:29:39,309 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740 2024-11-19T18:29:39,311 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:29:39,311 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:29:39,312 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:29:39,314 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:29:39,315 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770979, jitterRate=-0.019650503993034363}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:29:39,315 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:29:39,317 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732040979298Writing region info on filesystem at 1732040979298Initializing all the Stores at 1732040979299 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040979300 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040979300Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040979300Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040979300Cleaning up temporary data from old regions at 1732040979311 (+11 ms)Running coprocessor post-open hooks at 1732040979315 (+4 ms)Region opened successfully at 1732040979317 (+2 ms) 2024-11-19T18:29:39,320 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732040979274 2024-11-19T18:29:39,323 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:29:39,323 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:29:39,324 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,42223,1732040978406 2024-11-19T18:29:39,325 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,42223,1732040978406, state=OPEN 2024-11-19T18:29:39,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:29:39,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:29:39,333 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,42223,1732040978406 2024-11-19T18:29:39,333 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:39,333 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:39,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:29:39,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,42223,1732040978406 in 212 msec 2024-11-19T18:29:39,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:29:39,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 628 msec 2024-11-19T18:29:39,342 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:39,342 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:29:39,344 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:29:39,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,42223,1732040978406, seqNum=-1] 2024-11-19T18:29:39,345 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:29:39,347 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:29:39,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 704 msec 2024-11-19T18:29:39,355 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732040979355, completionTime=-1 2024-11-19T18:29:39,355 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:29:39,356 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:29:39,358 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:29:39,358 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041039358 2024-11-19T18:29:39,358 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041099358 2024-11-19T18:29:39,358 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-19T18:29:39,358 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,359 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,359 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,359 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:36205, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,359 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,359 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,361 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.900sec 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:29:39,365 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:29:39,368 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:29:39,368 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:29:39,368 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,36205,1732040978332-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:39,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e721f3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:39,444 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,36205,-1 for getting cluster id 2024-11-19T18:29:39,444 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:29:39,446 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a384b180-7b21-4843-a993-55743a0d5a26' 2024-11-19T18:29:39,446 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:29:39,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a384b180-7b21-4843-a993-55743a0d5a26" 2024-11-19T18:29:39,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39f18556, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:39,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,36205,-1] 2024-11-19T18:29:39,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:29:39,448 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,450 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48372, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:29:39,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@668848ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:39,451 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:29:39,453 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,42223,1732040978406, seqNum=-1] 2024-11-19T18:29:39,453 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:29:39,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:29:39,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,36205,1732040978332 2024-11-19T18:29:39,462 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:39,468 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:29:39,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:29:39,468 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:29:39,469 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:39,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,469 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:29:39,469 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:29:39,469 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=215330652, stopped=false 2024-11-19T18:29:39,469 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,36205,1732040978332 2024-11-19T18:29:39,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:39,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:39,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:39,472 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:29:39,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:39,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:29:39,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:39,473 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:39,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:39,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,474 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,42223,1732040978406' ***** 2024-11-19T18:29:39,474 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:29:39,474 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:29:39,474 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,42223,1732040978406 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:42223. 2024-11-19T18:29:39,475 DEBUG [RS:0;30db5f576be8:42223 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:29:39,475 DEBUG [RS:0;30db5f576be8:42223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:29:39,475 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:29:39,476 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T18:29:39,476 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T18:29:39,476 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:29:39,476 DEBUG [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T18:29:39,476 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:29:39,476 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:29:39,476 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:29:39,476 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:29:39,476 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T18:29:39,496 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/.tmp/ns/e5fea1636b1f43b2b7ec1eb5e616dd21 is 43, key is default/ns:d/1732040979348/Put/seqid=0 2024-11-19T18:29:39,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741835_1011 (size=5153) 2024-11-19T18:29:39,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741835_1011 (size=5153) 2024-11-19T18:29:39,505 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/.tmp/ns/e5fea1636b1f43b2b7ec1eb5e616dd21 2024-11-19T18:29:39,516 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/.tmp/ns/e5fea1636b1f43b2b7ec1eb5e616dd21 as hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/ns/e5fea1636b1f43b2b7ec1eb5e616dd21 2024-11-19T18:29:39,525 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/ns/e5fea1636b1f43b2b7ec1eb5e616dd21, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T18:29:39,526 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false 2024-11-19T18:29:39,526 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T18:29:39,533 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T18:29:39,534 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:29:39,534 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:39,534 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732040979476Running coprocessor pre-close hooks at 1732040979476Disabling compacts and flushes for region at 1732040979476Disabling writes for close at 1732040979476Obtaining lock to block concurrent updates at 1732040979476Preparing flush snapshotting stores in 1588230740 at 1732040979476Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732040979477 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732040979478 (+1 ms)Flushing 1588230740/ns: creating writer at 1732040979478Flushing 1588230740/ns: appending metadata at 1732040979496 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732040979496Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15935805: reopening flushed file at 1732040979515 (+19 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false at 1732040979526 (+11 ms)Writing region close event to WAL at 1732040979529 (+3 ms)Running coprocessor post-close hooks at 1732040979534 (+5 ms)Closed at 1732040979534 2024-11-19T18:29:39,535 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:39,676 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,42223,1732040978406; all regions closed. 2024-11-19T18:29:39,677 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,677 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,677 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,677 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,677 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741834_1010 (size=1152) 2024-11-19T18:29:39,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741834_1010 (size=1152) 2024-11-19T18:29:39,683 DEBUG [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/oldWALs 2024-11-19T18:29:39,683 INFO [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C42223%2C1732040978406.meta:.meta(num 1732040979287) 2024-11-19T18:29:39,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,683 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,684 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,684 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741833_1009 (size=93) 2024-11-19T18:29:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741833_1009 (size=93) 2024-11-19T18:29:39,688 DEBUG [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/oldWALs 2024-11-19T18:29:39,688 INFO [RS:0;30db5f576be8:42223 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C42223%2C1732040978406:(num 1732040978954) 2024-11-19T18:29:39,688 DEBUG [RS:0;30db5f576be8:42223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:39,688 INFO [RS:0;30db5f576be8:42223 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:29:39,688 INFO [RS:0;30db5f576be8:42223 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:29:39,689 INFO [RS:0;30db5f576be8:42223 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:29:39,689 INFO [RS:0;30db5f576be8:42223 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:29:39,689 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:29:39,689 INFO [RS:0;30db5f576be8:42223 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42223 2024-11-19T18:29:39,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,42223,1732040978406 2024-11-19T18:29:39,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:29:39,691 INFO [RS:0;30db5f576be8:42223 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:29:39,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,42223,1732040978406] 2024-11-19T18:29:39,696 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,42223,1732040978406 already deleted, retry=false 2024-11-19T18:29:39,696 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,42223,1732040978406 expired; onlineServers=0 2024-11-19T18:29:39,696 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,36205,1732040978332' ***** 2024-11-19T18:29:39,696 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:29:39,696 INFO [M:0;30db5f576be8:36205 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:29:39,696 INFO [M:0;30db5f576be8:36205 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:29:39,696 DEBUG [M:0;30db5f576be8:36205 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:29:39,696 DEBUG [M:0;30db5f576be8:36205 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:29:39,696 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:29:39,696 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040978656 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040978656,5,FailOnTimeoutGroup] 2024-11-19T18:29:39,696 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040978656 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040978656,5,FailOnTimeoutGroup] 2024-11-19T18:29:39,697 INFO [M:0;30db5f576be8:36205 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:29:39,697 INFO [M:0;30db5f576be8:36205 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:29:39,697 DEBUG [M:0;30db5f576be8:36205 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:29:39,697 INFO [M:0;30db5f576be8:36205 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:29:39,697 INFO [M:0;30db5f576be8:36205 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:29:39,697 INFO [M:0;30db5f576be8:36205 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:29:39,697 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:29:39,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:29:39,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:39,698 DEBUG [M:0;30db5f576be8:36205 {}] zookeeper.ZKUtil(347): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:29:39,698 WARN [M:0;30db5f576be8:36205 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:29:39,699 INFO [M:0;30db5f576be8:36205 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/.lastflushedseqids 2024-11-19T18:29:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741836_1012 (size=99) 2024-11-19T18:29:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741836_1012 (size=99) 2024-11-19T18:29:39,708 INFO [M:0;30db5f576be8:36205 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:29:39,709 INFO [M:0;30db5f576be8:36205 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:29:39,709 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:29:39,709 INFO [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:39,709 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:39,709 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:29:39,709 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:39,709 INFO [M:0;30db5f576be8:36205 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T18:29:39,729 DEBUG [M:0;30db5f576be8:36205 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f60180f2f4f4464fa4178d9a7ed8f7c6 is 82, key is hbase:meta,,1/info:regioninfo/1732040979324/Put/seqid=0 2024-11-19T18:29:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741837_1013 (size=5672) 2024-11-19T18:29:39,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741837_1013 (size=5672) 2024-11-19T18:29:39,736 INFO [M:0;30db5f576be8:36205 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f60180f2f4f4464fa4178d9a7ed8f7c6 2024-11-19T18:29:39,758 DEBUG [M:0;30db5f576be8:36205 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1f6725c165b46bb9454a036a382c309 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732040979354/Put/seqid=0 2024-11-19T18:29:39,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741838_1014 (size=5275) 2024-11-19T18:29:39,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741838_1014 (size=5275) 2024-11-19T18:29:39,765 INFO [M:0;30db5f576be8:36205 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1f6725c165b46bb9454a036a382c309 2024-11-19T18:29:39,788 DEBUG [M:0;30db5f576be8:36205 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e416c1f8a549cbbfdb2c3572a698c0 is 69, key is 30db5f576be8,42223,1732040978406/rs:state/1732040978776/Put/seqid=0 2024-11-19T18:29:39,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:39,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42223-0x101317dda180001, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:39,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741839_1015 (size=5156) 2024-11-19T18:29:39,793 INFO [RS:0;30db5f576be8:42223 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:29:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741839_1015 (size=5156) 2024-11-19T18:29:39,794 INFO [RS:0;30db5f576be8:42223 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,42223,1732040978406; zookeeper connection closed. 2024-11-19T18:29:39,794 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 2024-11-19T18:29:39,794 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:29:39,794 INFO [M:0;30db5f576be8:36205 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e416c1f8a549cbbfdb2c3572a698c0 2024-11-19T18:29:39,816 DEBUG [M:0;30db5f576be8:36205 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21fc7271e90e407e8cce416aa32a5a4e is 52, key is load_balancer_on/state:d/1732040979466/Put/seqid=0 2024-11-19T18:29:39,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741840_1016 (size=5056) 2024-11-19T18:29:39,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741840_1016 (size=5056) 2024-11-19T18:29:39,822 INFO [M:0;30db5f576be8:36205 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21fc7271e90e407e8cce416aa32a5a4e 2024-11-19T18:29:39,831 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f60180f2f4f4464fa4178d9a7ed8f7c6 as hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f60180f2f4f4464fa4178d9a7ed8f7c6 2024-11-19T18:29:39,838 INFO [M:0;30db5f576be8:36205 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f60180f2f4f4464fa4178d9a7ed8f7c6, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T18:29:39,839 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1f6725c165b46bb9454a036a382c309 as hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1f6725c165b46bb9454a036a382c309 2024-11-19T18:29:39,846 INFO [M:0;30db5f576be8:36205 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1f6725c165b46bb9454a036a382c309, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T18:29:39,848 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e416c1f8a549cbbfdb2c3572a698c0 as hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/75e416c1f8a549cbbfdb2c3572a698c0 2024-11-19T18:29:39,854 INFO [M:0;30db5f576be8:36205 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/75e416c1f8a549cbbfdb2c3572a698c0, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T18:29:39,856 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21fc7271e90e407e8cce416aa32a5a4e as hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21fc7271e90e407e8cce416aa32a5a4e 2024-11-19T18:29:39,863 INFO [M:0;30db5f576be8:36205 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/d21a1d00-531c-5dbd-abae-14d3c621bfef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21fc7271e90e407e8cce416aa32a5a4e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T18:29:39,864 INFO [M:0;30db5f576be8:36205 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=29, compaction requested=false 2024-11-19T18:29:39,866 INFO [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:39,866 DEBUG [M:0;30db5f576be8:36205 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732040979709Disabling compacts and flushes for region at 1732040979709Disabling writes for close at 1732040979709Obtaining lock to block concurrent updates at 1732040979709Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732040979709Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732040979710 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732040979711 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732040979711Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732040979728 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732040979728Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732040979742 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732040979758 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732040979758Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732040979771 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732040979787 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732040979787Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732040979800 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732040979816 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732040979816Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b750934: reopening flushed file at 1732040979829 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61f358e3: reopening flushed file at 1732040979838 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b6479c4: reopening flushed file at 1732040979847 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9cedc07: reopening flushed file at 1732040979855 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=29, compaction requested=false at 1732040979864 (+9 ms)Writing region close event to WAL at 1732040979866 (+2 ms)Closed at 1732040979866 2024-11-19T18:29:39,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,867 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,868 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40973 is added to blk_1073741830_1006 (size=10311) 2024-11-19T18:29:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39567 is added to blk_1073741830_1006 (size=10311) 2024-11-19T18:29:39,871 INFO [M:0;30db5f576be8:36205 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:29:39,871 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:29:39,872 INFO [M:0;30db5f576be8:36205 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36205 2024-11-19T18:29:39,872 INFO [M:0;30db5f576be8:36205 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:29:39,975 INFO [M:0;30db5f576be8:36205 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:29:39,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:39,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36205-0x101317dda180000, quorum=127.0.0.1:55695, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:29:39,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c89f457{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:39,980 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18f27499{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:39,980 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:39,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cc26d13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:39,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@81550dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:39,982 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:39,982 WARN [BP-1966740921-172.17.0.2-1732040977466 heartbeating to localhost/127.0.0.1:40287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:39,982 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:39,982 WARN [BP-1966740921-172.17.0.2-1732040977466 heartbeating to localhost/127.0.0.1:40287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1966740921-172.17.0.2-1732040977466 (Datanode Uuid de27e5fc-c4d8-440f-b49a-951e7d553d45) service to localhost/127.0.0.1:40287 2024-11-19T18:29:39,983 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data3/current/BP-1966740921-172.17.0.2-1732040977466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:39,983 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data4/current/BP-1966740921-172.17.0.2-1732040977466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:39,983 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:39,985 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65506a11{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:39,986 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@670e4080{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:39,986 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:39,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43fff29e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:39,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a0544f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:39,987 WARN [BP-1966740921-172.17.0.2-1732040977466 heartbeating to localhost/127.0.0.1:40287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:39,987 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:39,987 WARN [BP-1966740921-172.17.0.2-1732040977466 heartbeating to localhost/127.0.0.1:40287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1966740921-172.17.0.2-1732040977466 (Datanode Uuid 84c4361a-e4e5-4575-93d9-fac89bfe7245) service to localhost/127.0.0.1:40287 2024-11-19T18:29:39,987 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:39,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data1/current/BP-1966740921-172.17.0.2-1732040977466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:39,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/cluster_f0ce6721-b7fb-85f7-49a0-8a95e3f2d7ec/data/data2/current/BP-1966740921-172.17.0.2-1732040977466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:39,988 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:39,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@788684ca{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:29:39,995 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ae773f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:39,995 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:39,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@534816f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:39,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a78e1c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:40,002 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.log.dir so I do NOT create it in target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/acd26ea7-c5a5-2c36-a63c-241a5b0caf5f/hadoop.tmp.dir so I do NOT create it in target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424, deleteOnExit=true 2024-11-19T18:29:40,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/test.cache.data in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:29:40,024 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:29:40,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:29:40,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:29:40,040 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:29:40,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:40,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:40,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:40,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:40,155 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:29:40,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:40,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8247472{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:40,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46889226{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:40,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e21db3f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-46011-hadoop-hdfs-3_4_1-tests_jar-_-any-11885595789297420319/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:29:40,275 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17c48ca{HTTP/1.1, (http/1.1)}{localhost:46011} 2024-11-19T18:29:40,275 INFO [Time-limited test {}] server.Server(415): Started @103657ms 2024-11-19T18:29:40,289 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:29:40,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:40,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:40,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:40,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:40,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:29:40,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a514f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:40,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a6958e2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:40,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@307e6985{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-33249-hadoop-hdfs-3_4_1-tests_jar-_-any-8445489596765788975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:40,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c3d2a60{HTTP/1.1, (http/1.1)}{localhost:33249} 2024-11-19T18:29:40,487 INFO [Time-limited test {}] server.Server(415): Started @103870ms 2024-11-19T18:29:40,490 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:40,534 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:40,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:40,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:40,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:40,542 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:29:40,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c461833{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:40,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2295376c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:40,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5450787c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-39149-hadoop-hdfs-3_4_1-tests_jar-_-any-18056509543280410617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:40,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@198c3788{HTTP/1.1, (http/1.1)}{localhost:39149} 2024-11-19T18:29:40,708 INFO [Time-limited test {}] server.Server(415): Started @104090ms 2024-11-19T18:29:40,710 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:40,762 WARN [Thread-662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data1/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:40,762 WARN [Thread-663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data2/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:40,801 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:29:40,851 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d67a384e58a3d4e with lease ID 0x2f94b0dd432c328b: Processing first storage report for DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82 from datanode DatanodeRegistration(127.0.0.1:35317, datanodeUuid=6b0e1fe9-d13d-4b20-928d-4570c7bfb307, infoPort=44403, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d67a384e58a3d4e with lease ID 0x2f94b0dd432c328b: from storage DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82 node DatanodeRegistration(127.0.0.1:35317, datanodeUuid=6b0e1fe9-d13d-4b20-928d-4570c7bfb307, infoPort=44403, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d67a384e58a3d4e with lease ID 0x2f94b0dd432c328b: Processing first storage report for DS-fae43b15-4a29-47e4-a907-b005da823145 from datanode DatanodeRegistration(127.0.0.1:35317, datanodeUuid=6b0e1fe9-d13d-4b20-928d-4570c7bfb307, infoPort=44403, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d67a384e58a3d4e with lease ID 0x2f94b0dd432c328b: from storage DS-fae43b15-4a29-47e4-a907-b005da823145 node DatanodeRegistration(127.0.0.1:35317, datanodeUuid=6b0e1fe9-d13d-4b20-928d-4570c7bfb307, infoPort=44403, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:40,879 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data3/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:40,880 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data4/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:40,925 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:40,929 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d04b7f24b85034c with lease ID 0x2f94b0dd432c328c: Processing first storage report for DS-bb6bd24d-f684-4186-ac36-152d27a55a13 from datanode DatanodeRegistration(127.0.0.1:44689, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=42465, infoSecurePort=0, ipcPort=35655, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:40,930 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d04b7f24b85034c with lease ID 0x2f94b0dd432c328c: from storage DS-bb6bd24d-f684-4186-ac36-152d27a55a13 node DatanodeRegistration(127.0.0.1:44689, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=42465, infoSecurePort=0, ipcPort=35655, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:40,930 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d04b7f24b85034c with lease ID 0x2f94b0dd432c328c: Processing first storage report for DS-6ca4ee45-d0a7-4be1-b3c7-3c8bf3fe00c8 from datanode DatanodeRegistration(127.0.0.1:44689, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=42465, infoSecurePort=0, ipcPort=35655, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:40,930 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d04b7f24b85034c with lease ID 0x2f94b0dd432c328c: from storage DS-6ca4ee45-d0a7-4be1-b3c7-3c8bf3fe00c8 node DatanodeRegistration(127.0.0.1:44689, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=42465, infoSecurePort=0, ipcPort=35655, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:41,006 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f 2024-11-19T18:29:41,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/zookeeper_0, clientPort=60839, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:29:41,015 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60839 2024-11-19T18:29:41,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:29:41,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:29:41,036 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39 with version=8 2024-11-19T18:29:41,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:29:41,038 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:29:41,038 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:29:41,039 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:29:41,039 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46573 2024-11-19T18:29:41,041 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46573 connecting to ZooKeeper ensemble=127.0.0.1:60839 2024-11-19T18:29:41,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465730x0, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:29:41,048 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46573-0x101317de4a00000 connected 2024-11-19T18:29:41,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,075 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:41,076 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39, hbase.cluster.distributed=false 2024-11-19T18:29:41,078 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:29:41,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-19T18:29:41,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46573 2024-11-19T18:29:41,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46573 2024-11-19T18:29:41,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-19T18:29:41,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-19T18:29:41,108 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:29:41,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:29:41,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34263 2024-11-19T18:29:41,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34263 connecting to ZooKeeper ensemble=127.0.0.1:60839 2024-11-19T18:29:41,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342630x0, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:29:41,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34263-0x101317de4a00001 connected 2024-11-19T18:29:41,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:29:41,120 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:29:41,127 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:29:41,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:29:41,132 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:29:41,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34263 2024-11-19T18:29:41,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34263 2024-11-19T18:29:41,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34263 2024-11-19T18:29:41,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34263 2024-11-19T18:29:41,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34263 2024-11-19T18:29:41,155 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:46573 2024-11-19T18:29:41,155 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:41,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:41,159 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:29:41,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,161 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:29:41,162 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,46573,1732040981038 from backup master directory 2024-11-19T18:29:41,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:41,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:29:41,165 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:29:41,166 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,171 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/hbase.id] with ID: 8c9e9f99-b6a4-42de-a59e-347186a6059f 2024-11-19T18:29:41,171 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/.tmp/hbase.id 2024-11-19T18:29:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:29:41,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:29:41,188 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/.tmp/hbase.id]:[hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/hbase.id] 2024-11-19T18:29:41,205 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:41,205 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:29:41,207 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T18:29:41,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:29:41,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:29:41,223 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:29:41,225 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:29:41,225 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:41,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:29:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:29:41,245 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store 2024-11-19T18:29:41,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:29:41,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:29:41,257 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:41,257 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:29:41,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732040981257Disabling compacts and flushes for region at 1732040981257Disabling writes for close at 1732040981257Writing region close event to WAL at 1732040981257Closed at 1732040981257 2024-11-19T18:29:41,258 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/.initializing 2024-11-19T18:29:41,259 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,264 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C46573%2C1732040981038, suffix=, logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038, archiveDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/oldWALs, maxLogs=10 2024-11-19T18:29:41,264 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C46573%2C1732040981038.1732040981264 2024-11-19T18:29:41,279 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 2024-11-19T18:29:41,284 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42465:42465),(127.0.0.1/127.0.0.1:44403:44403)] 2024-11-19T18:29:41,288 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:29:41,288 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:41,288 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,288 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:29:41,294 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:41,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:29:41,297 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:41,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,299 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:29:41,299 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:41,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:29:41,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:41,303 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,303 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,304 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,306 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,306 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,307 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:29:41,308 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:29:41,313 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:29:41,314 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707720, jitterRate=-0.10008776187896729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:29:41,315 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732040981289Initializing all the Stores at 1732040981290 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040981290Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040981292 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040981292Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040981292Cleaning up temporary data from old regions at 1732040981306 (+14 ms)Region opened successfully at 1732040981315 (+9 ms) 2024-11-19T18:29:41,316 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:29:41,322 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a8696bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:29:41,323 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:29:41,323 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:29:41,323 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:29:41,324 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:29:41,324 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:29:41,325 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:29:41,325 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:29:41,331 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:29:41,332 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:29:41,334 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:29:41,334 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:29:41,335 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:29:41,337 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:29:41,337 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:29:41,338 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:29:41,341 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:29:41,343 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:29:41,344 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:29:41,347 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:29:41,349 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:29:41,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:41,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:29:41,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,351 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,46573,1732040981038, sessionid=0x101317de4a00000, setting cluster-up flag (Was=false) 2024-11-19T18:29:41,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,364 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:29:41,366 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,377 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:29:41,379 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,46573,1732040981038 2024-11-19T18:29:41,381 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:29:41,384 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:41,385 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:29:41,385 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:29:41,385 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,46573,1732040981038 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:29:41,389 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:41,389 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:41,389 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:41,389 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:29:41,390 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:29:41,390 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,390 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:29:41,390 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,401 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:41,401 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:29:41,401 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041011401 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:29:41,402 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:29:41,406 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,407 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:29:41,416 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,427 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:29:41,427 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:29:41,427 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:29:41,428 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:29:41,428 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:29:41,428 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040981428,5,FailOnTimeoutGroup] 2024-11-19T18:29:41,429 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040981428,5,FailOnTimeoutGroup] 2024-11-19T18:29:41,429 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,429 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:29:41,430 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,430 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,447 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(746): ClusterId : 8c9e9f99-b6a4-42de-a59e-347186a6059f 2024-11-19T18:29:41,447 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:29:41,451 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:29:41,451 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:29:41,454 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:29:41,457 DEBUG [RS:0;30db5f576be8:34263 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e5cb49c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:29:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:29:41,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:29:41,466 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:29:41,466 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39 2024-11-19T18:29:41,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:29:41,490 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:34263 2024-11-19T18:29:41,490 INFO [RS:0;30db5f576be8:34263 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:29:41,490 INFO [RS:0;30db5f576be8:34263 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:29:41,490 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:29:41,492 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,46573,1732040981038 with port=34263, startcode=1732040981107 2024-11-19T18:29:41,492 DEBUG [RS:0;30db5f576be8:34263 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:29:41,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:29:41,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:41,501 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36531, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:29:41,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:29:41,505 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39 2024-11-19T18:29:41,505 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44171 2024-11-19T18:29:41,505 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:29:41,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:29:41,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:41,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:29:41,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:29:41,509 DEBUG [RS:0;30db5f576be8:34263 {}] zookeeper.ZKUtil(111): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,509 WARN [RS:0;30db5f576be8:34263 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:29:41,509 INFO [RS:0;30db5f576be8:34263 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:41,510 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,513 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,34263,1732040981107] 2024-11-19T18:29:41,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:29:41,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:41,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:29:41,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:29:41,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:41,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:29:41,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:29:41,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:41,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:41,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:29:41,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740 2024-11-19T18:29:41,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740 2024-11-19T18:29:41,522 INFO [RS:0;30db5f576be8:34263 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:29:41,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:29:41,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:29:41,524 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:29:41,525 INFO [RS:0;30db5f576be8:34263 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:29:41,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:29:41,527 INFO [RS:0;30db5f576be8:34263 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:29:41,527 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,527 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:29:41,530 INFO [RS:0;30db5f576be8:34263 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:29:41,530 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,530 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,530 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:41,531 DEBUG [RS:0;30db5f576be8:34263 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:41,546 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:29:41,547 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868360, jitterRate=0.10417692363262177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:29:41,548 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,548 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,549 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,549 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,549 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732040981494Initializing all the Stores at 1732040981496 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040981496Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040981503 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040981503Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040981503Cleaning up temporary data from old regions at 1732040981524 (+21 ms)Region opened successfully at 1732040981549 (+25 ms) 2024-11-19T18:29:41,549 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,34263,1732040981107-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:29:41,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:29:41,549 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:29:41,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:29:41,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:29:41,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:29:41,553 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:29:41,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732040981549Disabling compacts and flushes for region at 1732040981549Disabling writes for close at 1732040981549Writing region close event to WAL at 1732040981553 (+4 ms)Closed at 1732040981553 2024-11-19T18:29:41,556 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:41,556 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:29:41,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:29:41,558 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:29:41,560 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:29:41,575 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:29:41,575 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,34263,1732040981107-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,575 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,576 INFO [RS:0;30db5f576be8:34263 {}] regionserver.Replication(171): 30db5f576be8,34263,1732040981107 started 2024-11-19T18:29:41,600 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:41,600 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,34263,1732040981107, RpcServer on 30db5f576be8/172.17.0.2:34263, sessionid=0x101317de4a00001 2024-11-19T18:29:41,601 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:29:41,601 DEBUG [RS:0;30db5f576be8:34263 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,601 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,34263,1732040981107' 2024-11-19T18:29:41,601 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:29:41,602 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:29:41,603 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:29:41,603 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:29:41,604 DEBUG [RS:0;30db5f576be8:34263 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,604 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,34263,1732040981107' 2024-11-19T18:29:41,604 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:29:41,604 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:29:41,605 DEBUG [RS:0;30db5f576be8:34263 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:29:41,605 INFO [RS:0;30db5f576be8:34263 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:29:41,605 INFO [RS:0;30db5f576be8:34263 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:29:41,708 INFO [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C34263%2C1732040981107, suffix=, logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107, archiveDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs, maxLogs=32 2024-11-19T18:29:41,710 INFO [RS:0;30db5f576be8:34263 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732040981710 2024-11-19T18:29:41,711 WARN [30db5f576be8:46573 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:29:41,730 INFO [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 2024-11-19T18:29:41,742 DEBUG [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42465:42465),(127.0.0.1/127.0.0.1:44403:44403)] 2024-11-19T18:29:41,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:29:41,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:29:41,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T18:29:41,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:41,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:41,961 DEBUG [30db5f576be8:46573 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:29:41,962 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,34263,1732040981107 2024-11-19T18:29:41,964 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,34263,1732040981107, state=OPENING 2024-11-19T18:29:41,966 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:29:41,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:29:41,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:41,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:41,970 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:29:41,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,34263,1732040981107}] 2024-11-19T18:29:42,125 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:29:42,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37771, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:29:42,134 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:29:42,134 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:42,137 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C34263%2C1732040981107.meta, suffix=.meta, logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107, archiveDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs, maxLogs=32 2024-11-19T18:29:42,139 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta 2024-11-19T18:29:42,150 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta 2024-11-19T18:29:42,165 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42465:42465),(127.0.0.1/127.0.0.1:44403:44403)] 2024-11-19T18:29:42,176 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:29:42,177 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:29:42,177 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:29:42,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:29:42,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:29:42,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:42,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:42,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:29:42,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:29:42,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:42,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:42,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:29:42,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:29:42,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:42,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:42,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:29:42,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:29:42,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:42,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:29:42,193 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:29:42,194 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740 2024-11-19T18:29:42,195 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740 2024-11-19T18:29:42,197 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:29:42,197 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:29:42,197 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:29:42,199 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:29:42,200 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871704, jitterRate=0.1084294319152832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:29:42,200 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:29:42,202 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732040982178Writing region info on filesystem at 1732040982178Initializing all the Stores at 1732040982179 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040982179Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040982180 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040982180Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732040982180Cleaning up temporary data from old regions at 1732040982197 (+17 ms)Running coprocessor post-open hooks at 1732040982201 (+4 ms)Region opened successfully at 1732040982202 (+1 ms) 2024-11-19T18:29:42,203 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732040982125 2024-11-19T18:29:42,208 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:29:42,208 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:29:42,208 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,34263,1732040981107 2024-11-19T18:29:42,245 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,34263,1732040981107, state=OPEN 2024-11-19T18:29:42,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:29:42,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:29:42,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:42,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:29:42,257 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,34263,1732040981107 2024-11-19T18:29:42,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:29:42,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,34263,1732040981107 in 286 msec 2024-11-19T18:29:42,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:29:42,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 708 msec 2024-11-19T18:29:42,278 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:29:42,279 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:29:42,280 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:29:42,280 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,34263,1732040981107, seqNum=-1] 2024-11-19T18:29:42,281 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:29:42,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50497, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:29:42,291 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 907 msec 2024-11-19T18:29:42,292 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732040982292, completionTime=-1 2024-11-19T18:29:42,292 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:29:42,292 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:29:42,294 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:29:42,294 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041042294 2024-11-19T18:29:42,294 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041102294 2024-11-19T18:29:42,294 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-19T18:29:42,295 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,295 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,295 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,295 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:46573, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,295 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,297 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:29:42,300 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.138sec 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:29:42,305 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:29:42,316 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:29:42,316 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:29:42,316 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,46573,1732040981038-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dc8ddff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:42,347 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,46573,-1 for getting cluster id 2024-11-19T18:29:42,347 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:29:42,350 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8c9e9f99-b6a4-42de-a59e-347186a6059f' 2024-11-19T18:29:42,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:29:42,350 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8c9e9f99-b6a4-42de-a59e-347186a6059f" 2024-11-19T18:29:42,351 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@132de1c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:42,351 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,46573,-1] 2024-11-19T18:29:42,351 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:29:42,352 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:29:42,353 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41840, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:29:42,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2efeeef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:29:42,355 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:29:42,360 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,34263,1732040981107, seqNum=-1] 2024-11-19T18:29:42,361 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:29:42,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33972, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:29:42,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,46573,1732040981038 2024-11-19T18:29:42,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:42,377 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:29:42,400 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:29:42,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:42,421 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:29:42,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:42,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:42,421 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:29:42,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:29:42,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:29:42,422 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:29:42,422 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:29:42,430 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45935 2024-11-19T18:29:42,432 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45935 connecting to ZooKeeper ensemble=127.0.0.1:60839 2024-11-19T18:29:42,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:42,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:29:42,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:42,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:42,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:42,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459350x0, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:29:42,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:459350x0, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-19T18:29:42,466 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-19T18:29:42,470 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:29:42,470 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45935-0x101317de4a00002 connected 2024-11-19T18:29:42,478 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:29:42,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:29:42,482 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:29:42,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45935 2024-11-19T18:29:42,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45935 2024-11-19T18:29:42,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45935 2024-11-19T18:29:42,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45935 2024-11-19T18:29:42,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45935 2024-11-19T18:29:42,498 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(746): ClusterId : 8c9e9f99-b6a4-42de-a59e-347186a6059f 2024-11-19T18:29:42,498 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:29:42,502 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:29:42,502 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:29:42,508 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:29:42,510 DEBUG [RS:1;30db5f576be8:45935 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a6385a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:29:42,531 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;30db5f576be8:45935 2024-11-19T18:29:42,531 INFO [RS:1;30db5f576be8:45935 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:29:42,531 INFO [RS:1;30db5f576be8:45935 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:29:42,531 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:29:42,532 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,46573,1732040981038 with port=45935, startcode=1732040982421 2024-11-19T18:29:42,533 DEBUG [RS:1;30db5f576be8:45935 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:29:42,538 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38165, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:29:42,538 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,538 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,540 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39 2024-11-19T18:29:42,540 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44171 2024-11-19T18:29:42,540 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:29:42,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:29:42,543 DEBUG [RS:1;30db5f576be8:45935 {}] zookeeper.ZKUtil(111): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,543 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,45935,1732040982421] 2024-11-19T18:29:42,543 WARN [RS:1;30db5f576be8:45935 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:29:42,543 INFO [RS:1;30db5f576be8:45935 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:29:42,543 DEBUG [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,556 INFO [RS:1;30db5f576be8:45935 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:29:42,563 INFO [RS:1;30db5f576be8:45935 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:29:42,569 INFO [RS:1;30db5f576be8:45935 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:29:42,569 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,572 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:29:42,574 INFO [RS:1;30db5f576be8:45935 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:29:42,574 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,574 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,574 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,574 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,574 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,574 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:42,575 DEBUG [RS:1;30db5f576be8:45935 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:29:42,588 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,588 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,588 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,588 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,589 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,589 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,45935,1732040982421-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:29:42,615 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:29:42,615 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,45935,1732040982421-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,615 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,615 INFO [RS:1;30db5f576be8:45935 {}] regionserver.Replication(171): 30db5f576be8,45935,1732040982421 started 2024-11-19T18:29:42,630 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:29:42,630 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,45935,1732040982421, RpcServer on 30db5f576be8/172.17.0.2:45935, sessionid=0x101317de4a00002 2024-11-19T18:29:42,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;30db5f576be8:45935,5,FailOnTimeoutGroup] 2024-11-19T18:29:42,631 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:29:42,631 DEBUG [RS:1;30db5f576be8:45935 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,631 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,45935,1732040982421' 2024-11-19T18:29:42,631 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:29:42,631 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-19T18:29:42,631 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T18:29:42,632 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:29:42,632 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:29:42,632 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:29:42,633 DEBUG [RS:1;30db5f576be8:45935 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,45935,1732040982421 2024-11-19T18:29:42,633 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,45935,1732040982421' 2024-11-19T18:29:42,633 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:29:42,633 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 30db5f576be8,46573,1732040981038 2024-11-19T18:29:42,633 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63a82de5 2024-11-19T18:29:42,633 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:29:42,634 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T18:29:42,634 DEBUG [RS:1;30db5f576be8:45935 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:29:42,634 INFO [RS:1;30db5f576be8:45935 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:29:42,634 INFO [RS:1;30db5f576be8:45935 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:29:42,637 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41846, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T18:29:42,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T18:29:42,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T18:29:42,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:29:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T18:29:42,646 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T18:29:42,646 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:42,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-19T18:29:42,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T18:29:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:29:42,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741835_1011 (size=393) 2024-11-19T18:29:42,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741835_1011 (size=393) 2024-11-19T18:29:42,714 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c2b57c628ca9f719f9c427e72d9c8d20, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39 2024-11-19T18:29:42,737 INFO [RS:1;30db5f576be8:45935 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C45935%2C1732040982421, suffix=, logDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421, archiveDir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs, maxLogs=32 2024-11-19T18:29:42,738 INFO [RS:1;30db5f576be8:45935 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C45935%2C1732040982421.1732040982738 2024-11-19T18:29:42,763 INFO [RS:1;30db5f576be8:45935 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 2024-11-19T18:29:42,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35317 is added to blk_1073741836_1012 (size=76) 2024-11-19T18:29:42,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741836_1012 (size=76) 2024-11-19T18:29:42,775 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:42,775 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing c2b57c628ca9f719f9c427e72d9c8d20, disabling compactions & flushes 2024-11-19T18:29:42,775 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:42,775 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:42,775 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. after waiting 0 ms 2024-11-19T18:29:42,775 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:42,775 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:42,776 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for c2b57c628ca9f719f9c427e72d9c8d20: Waiting for close lock at 1732040982775Disabling compacts and flushes for region at 1732040982775Disabling writes for close at 1732040982775Writing region close event to WAL at 1732040982775Closed at 1732040982775 2024-11-19T18:29:42,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T18:29:42,778 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732040982778"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732040982778"}]},"ts":"1732040982778"} 2024-11-19T18:29:42,783 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T18:29:42,787 DEBUG [RS:1;30db5f576be8:45935 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42465:42465),(127.0.0.1/127.0.0.1:44403:44403)] 2024-11-19T18:29:42,788 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T18:29:42,789 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732040982788"}]},"ts":"1732040982788"} 2024-11-19T18:29:42,791 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-19T18:29:42,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c2b57c628ca9f719f9c427e72d9c8d20, ASSIGN}] 2024-11-19T18:29:42,793 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c2b57c628ca9f719f9c427e72d9c8d20, ASSIGN 2024-11-19T18:29:42,796 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c2b57c628ca9f719f9c427e72d9c8d20, ASSIGN; state=OFFLINE, location=30db5f576be8,34263,1732040981107; forceNewPlan=false, retain=false 2024-11-19T18:29:42,947 INFO [30db5f576be8:46573 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T18:29:42,948 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c2b57c628ca9f719f9c427e72d9c8d20, regionState=OPENING, regionLocation=30db5f576be8,34263,1732040981107 2024-11-19T18:29:42,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c2b57c628ca9f719f9c427e72d9c8d20, ASSIGN because future has completed 2024-11-19T18:29:42,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2b57c628ca9f719f9c427e72d9c8d20, server=30db5f576be8,34263,1732040981107}] 2024-11-19T18:29:43,112 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:43,112 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c2b57c628ca9f719f9c427e72d9c8d20, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:29:43,113 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,113 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:29:43,114 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,114 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,116 INFO [StoreOpener-c2b57c628ca9f719f9c427e72d9c8d20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,118 INFO [StoreOpener-c2b57c628ca9f719f9c427e72d9c8d20-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2b57c628ca9f719f9c427e72d9c8d20 columnFamilyName info 2024-11-19T18:29:43,118 DEBUG [StoreOpener-c2b57c628ca9f719f9c427e72d9c8d20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:29:43,119 INFO [StoreOpener-c2b57c628ca9f719f9c427e72d9c8d20-1 {}] regionserver.HStore(327): Store=c2b57c628ca9f719f9c427e72d9c8d20/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:29:43,119 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,120 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,120 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,121 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,121 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,123 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,126 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:29:43,127 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c2b57c628ca9f719f9c427e72d9c8d20; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884232, jitterRate=0.12435929477214813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:29:43,127 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:43,128 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c2b57c628ca9f719f9c427e72d9c8d20: Running coprocessor pre-open hook at 1732040983114Writing region info on filesystem at 1732040983114Initializing all the Stores at 1732040983115 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732040983115Cleaning up temporary data from old regions at 1732040983121 (+6 ms)Running coprocessor post-open hooks at 1732040983127 (+6 ms)Region opened successfully at 1732040983127 2024-11-19T18:29:43,132 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20., pid=6, masterSystemTime=1732040983106 2024-11-19T18:29:43,135 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:43,135 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:43,136 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c2b57c628ca9f719f9c427e72d9c8d20, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,34263,1732040981107 2024-11-19T18:29:43,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2b57c628ca9f719f9c427e72d9c8d20, server=30db5f576be8,34263,1732040981107 because future has completed 2024-11-19T18:29:43,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T18:29:43,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c2b57c628ca9f719f9c427e72d9c8d20, server=30db5f576be8,34263,1732040981107 in 194 msec 2024-11-19T18:29:43,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T18:29:43,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c2b57c628ca9f719f9c427e72d9c8d20, ASSIGN in 357 msec 2024-11-19T18:29:43,154 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T18:29:43,154 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732040983154"}]},"ts":"1732040983154"} 2024-11-19T18:29:43,157 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-19T18:29:43,159 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T18:29:43,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 521 msec 2024-11-19T18:29:47,365 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:29:47,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:47,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:47,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:47,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:29:47,522 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-19T18:29:51,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:29:51,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T18:29:51,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T18:29:51,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-19T18:29:51,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:29:51,862 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T18:29:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:29:52,742 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-19T18:29:52,742 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-19T18:29:52,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T18:29:52,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:29:52,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:52,761 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:52,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:52,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:52,762 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:29:52,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f9b72e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:52,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7feb24a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:52,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1545e1f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-43613-hadoop-hdfs-3_4_1-tests_jar-_-any-5648230612966117394/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:52,878 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@219c70cc{HTTP/1.1, (http/1.1)}{localhost:43613} 2024-11-19T18:29:52,878 INFO [Time-limited test {}] server.Server(415): Started @116261ms 2024-11-19T18:29:52,880 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:52,918 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:52,921 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:52,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:52,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:52,922 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:29:52,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15ec6c86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:52,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37886c8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:52,998 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:52,998 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:53,022 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2d859efb91caed5 with lease ID 0x2f94b0dd432c328d: Processing first storage report for DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9 from datanode DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2d859efb91caed5 with lease ID 0x2f94b0dd432c328d: from storage DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9 node DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2d859efb91caed5 with lease ID 0x2f94b0dd432c328d: Processing first storage report for DS-9d70bd03-fc4f-43b4-85e2-dd584299780c from datanode DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2d859efb91caed5 with lease ID 0x2f94b0dd432c328d: from storage DS-9d70bd03-fc4f-43b4-85e2-dd584299780c node DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@312d6b73{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-42829-hadoop-hdfs-3_4_1-tests_jar-_-any-11666240205241817159/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:53,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4fb99827{HTTP/1.1, (http/1.1)}{localhost:42829} 2024-11-19T18:29:53,052 INFO [Time-limited test {}] server.Server(415): Started @116434ms 2024-11-19T18:29:53,053 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:53,097 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:29:53,101 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:29:53,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:29:53,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:29:53,103 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:29:53,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:29:53,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:29:53,174 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data7/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:53,174 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data8/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:53,196 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19ca91b022282794 with lease ID 0x2f94b0dd432c328e: Processing first storage report for DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa from datanode DatanodeRegistration(127.0.0.1:33555, datanodeUuid=81938edd-1f1f-485e-a826-7e374c66012b, infoPort=44357, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19ca91b022282794 with lease ID 0x2f94b0dd432c328e: from storage DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa node DatanodeRegistration(127.0.0.1:33555, datanodeUuid=81938edd-1f1f-485e-a826-7e374c66012b, infoPort=44357, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19ca91b022282794 with lease ID 0x2f94b0dd432c328e: Processing first storage report for DS-c8625a7b-3eae-47f6-b127-098930fb77bf from datanode DatanodeRegistration(127.0.0.1:33555, datanodeUuid=81938edd-1f1f-485e-a826-7e374c66012b, infoPort=44357, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19ca91b022282794 with lease ID 0x2f94b0dd432c328e: from storage DS-c8625a7b-3eae-47f6-b127-098930fb77bf node DatanodeRegistration(127.0.0.1:33555, datanodeUuid=81938edd-1f1f-485e-a826-7e374c66012b, infoPort=44357, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@304af6f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-36169-hadoop-hdfs-3_4_1-tests_jar-_-any-2717848578105498863/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:53,236 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:36169} 2024-11-19T18:29:53,236 INFO [Time-limited test {}] server.Server(415): Started @116618ms 2024-11-19T18:29:53,237 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:29:53,347 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data9/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:53,347 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data10/current/BP-1554420387-172.17.0.2-1732040980060/current, will proceed with Du for space computation calculation, 2024-11-19T18:29:53,370 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:29:53,373 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7282706672e1b08a with lease ID 0x2f94b0dd432c328f: Processing first storage report for DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca from datanode DatanodeRegistration(127.0.0.1:41507, datanodeUuid=46c886d7-1079-44a8-87c0-574247e98d24, infoPort=45089, infoSecurePort=0, ipcPort=39215, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,373 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7282706672e1b08a with lease ID 0x2f94b0dd432c328f: from storage DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca node DatanodeRegistration(127.0.0.1:41507, datanodeUuid=46c886d7-1079-44a8-87c0-574247e98d24, infoPort=45089, infoSecurePort=0, ipcPort=39215, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,373 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7282706672e1b08a with lease ID 0x2f94b0dd432c328f: Processing first storage report for DS-d546c4a2-058d-4504-839f-f75d00acc522 from datanode DatanodeRegistration(127.0.0.1:41507, datanodeUuid=46c886d7-1079-44a8-87c0-574247e98d24, infoPort=45089, infoSecurePort=0, ipcPort=39215, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060) 2024-11-19T18:29:53,373 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7282706672e1b08a with lease ID 0x2f94b0dd432c328f: from storage DS-d546c4a2-058d-4504-839f-f75d00acc522 node DatanodeRegistration(127.0.0.1:41507, datanodeUuid=46c886d7-1079-44a8-87c0-574247e98d24, infoPort=45089, infoSecurePort=0, ipcPort=39215, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:29:53,458 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,458 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,458 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,458 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,459 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta block BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:53,459 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:53,459 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:53,459 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1714327555_22 at /127.0.0.1:51614 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51614 dst: /127.0.0.1:35317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1714327555_22 at /127.0.0.1:51358 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44689:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51358 dst: /127.0.0.1:44689 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:51304 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44689:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51304 dst: /127.0.0.1:44689 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:51540 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51540 dst: /127.0.0.1:35317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:51588 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51588 dst: /127.0.0.1:35317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:51574 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51574 dst: /127.0.0.1:35317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:51332 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44689:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51332 dst: /127.0.0.1:44689 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:51324 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44689:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51324 dst: /127.0.0.1:44689 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5450787c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:53,463 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@198c3788{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:53,463 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:53,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2295376c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:53,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c461833{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:53,465 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:53,465 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid 9cea56a8-0f68-4292-b072-5e8ede8ccc4a) service to localhost/127.0.0.1:44171 2024-11-19T18:29:53,465 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:53,465 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:53,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data3/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:53,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data4/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:53,466 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:53,466 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6f8e159a {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing unknown operation src: /127.0.0.1:58006 dst: /127.0.0.1:35317 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,467 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,467 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta block BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,467 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:58010 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58010 dst: /127.0.0.1:35317 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:58008 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58008 dst: /127.0.0.1:35317 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,467 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@f20717e {}] datanode.DataXceiver(331): 127.0.0.1:35317:DataXceiver error processing unknown operation src: /127.0.0.1:58028 dst: /127.0.0.1:35317 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:53,468 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@307e6985{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:53,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c3d2a60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:53,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:53,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a6958e2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:53,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a514f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:53,471 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:53,471 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid 6b0e1fe9-d13d-4b20-928d-4570c7bfb307) service to localhost/127.0.0.1:44171 2024-11-19T18:29:53,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:53,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:53,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data1/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:53,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data2/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:53,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:53,475 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20., hostname=30db5f576be8,34263,1732040981107, seqNum=2] 2024-11-19T18:29:53,477 ERROR [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39-prefix:30db5f576be8,34263,1732040981107 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,477 WARN [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39-prefix:30db5f576be8,34263,1732040981107 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,477 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,477 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C34263%2C1732040981107:(num 1732040981710) roll requested 2024-11-19T18:29:53,478 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732040993477 2024-11-19T18:29:53,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:53,484 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:53,484 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:53,484 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:53,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:53,484 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 2024-11-19T18:29:53,484 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,485 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:53,485 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44357:44357),(127.0.0.1/127.0.0.1:45089:45089)] 2024-11-19T18:29:53,485 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:29:53,486 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-19T18:29:53,486 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-19T18:29:53,486 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 2024-11-19T18:29:53,489 WARN [IPC Server handler 1 on default port 44171 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-19T18:29:53,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 after 4ms 2024-11-19T18:29:54,582 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:55,226 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:55,485 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:55,486 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 2024-11-19T18:29:55,487 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:55,487 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:29:55,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:32938 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32938 dst: /127.0.0.1:33555 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:55,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:37418 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41507:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37418 dst: /127.0.0.1:41507 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:55,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@312d6b73{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:55,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4fb99827{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:55,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:55,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37886c8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:55,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15ec6c86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:55,491 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:55,491 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:55,491 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid 81938edd-1f1f-485e-a826-7e374c66012b) service to localhost/127.0.0.1:44171 2024-11-19T18:29:55,491 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:55,491 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data7/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:55,491 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data8/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:55,492 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:56,582 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:57,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:57,486 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]] 2024-11-19T18:29:57,486 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:57,486 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C34263%2C1732040981107:(num 1732040993477) roll requested 2024-11-19T18:29:57,486 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732040997486 2024-11-19T18:29:57,490 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:57,490 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:57,490 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741839_1021 2024-11-19T18:29:57,493 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:29:57,493 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 after 4007ms 2024-11-19T18:29:57,496 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T18:29:57,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:57,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:57,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:57,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:57,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:29:57,500 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040997486 2024-11-19T18:29:57,500 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45089:45089),(127.0.0.1/127.0.0.1:37267:37267)] 2024-11-19T18:29:57,500 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:29:57,500 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 is not closed yet, will try archiving it next time 2024-11-19T18:29:57,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41507 is added to blk_1073741838_1020 (size=3600) 2024-11-19T18:29:57,902 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:29:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741838_1020 (size=3600) 2024-11-19T18:29:58,582 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,500 WARN [ResponseProcessor for block BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,501 WARN [DataStreamer for file /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040997486 block BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:29:59,501 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,501 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:37444 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41507:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37444 dst: /127.0.0.1:41507 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,501 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43972 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43972 dst: /127.0.0.1:33901 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@304af6f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:29:59,503 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:29:59,503 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:29:59,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:29:59,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:29:59,505 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:29:59,505 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:29:59,505 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid 46c886d7-1079-44a8-87c0-574247e98d24) service to localhost/127.0.0.1:44171 2024-11-19T18:29:59,505 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:29:59,506 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data9/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:59,506 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data10/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:29:59,506 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:29:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:29:59,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:29:59,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/b592d6259fea45f290b0a4320c6bd9e0 is 1080, key is row0002/info:/1732040995493/Put/seqid=0 2024-11-19T18:29:59,537 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,538 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:29:59,538 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741841_1024 2024-11-19T18:29:59,538 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:29:59,540 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43988 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025 to mirror 127.0.0.1:35317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,541 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:29:59,541 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025 2024-11-19T18:29:59,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43988 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:29:59,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43988 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43988 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,541 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:29:59,543 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33555 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43990 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026 to mirror 127.0.0.1:33555 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,543 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:29:59,543 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026 2024-11-19T18:29:59,543 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43990 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:29:59,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:43990 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43990 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:29:59,544 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:29:59,545 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:29:59,545 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:29:59,545 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741844_1027 2024-11-19T18:29:59,545 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:29:59,546 WARN [IPC Server handler 0 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:29:59,546 WARN [IPC Server handler 0 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:29:59,546 WARN [IPC Server handler 0 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:29:59,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741845_1028 (size=10347) 2024-11-19T18:29:59,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/b592d6259fea45f290b0a4320c6bd9e0 2024-11-19T18:29:59,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/b592d6259fea45f290b0a4320c6bd9e0 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0 2024-11-19T18:29:59,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0, entries=5, sequenceid=11, filesize=10.1 K 2024-11-19T18:29:59,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for c2b57c628ca9f719f9c427e72d9c8d20 in 448ms, sequenceid=11, compaction requested=false 2024-11-19T18:29:59,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:00,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-19T18:30:00,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/ae540b4e208243cd892f45cb54e5a298 is 1080, key is row0007/info:/1732040999517/Put/seqid=0 2024-11-19T18:30:00,145 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41507 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:00,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029 to mirror 127.0.0.1:41507 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:00,145 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:00,145 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:30:00,145 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029 2024-11-19T18:30:00,145 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44014 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:00,146 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:00,147 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:00,147 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:00,147 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741847_1030 2024-11-19T18:30:00,148 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:00,150 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:00,150 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:00,150 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741848_1031 2024-11-19T18:30:00,150 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:00,152 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44689 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:00,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44026 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032 to mirror 127.0.0.1:44689 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:00,153 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:00,153 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032 2024-11-19T18:30:00,153 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44026 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:30:00,153 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:44026 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44026 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:00,153 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:00,154 WARN [IPC Server handler 2 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:00,154 WARN [IPC Server handler 2 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:00,154 WARN [IPC Server handler 2 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:00,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741850_1033 (size=12506) 2024-11-19T18:30:00,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/ae540b4e208243cd892f45cb54e5a298 2024-11-19T18:30:00,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/ae540b4e208243cd892f45cb54e5a298 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298 2024-11-19T18:30:00,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298, entries=7, sequenceid=24, filesize=12.2 K 2024-11-19T18:30:00,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for c2b57c628ca9f719f9c427e72d9c8d20 in 435ms, sequenceid=24, compaction requested=false 2024-11-19T18:30:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-19T18:30:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298 because midkey is the same as first or last row 2024-11-19T18:30:00,583 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,501 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]] 2024-11-19T18:30:01,501 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,501 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C34263%2C1732040981107:(num 1732040997486) roll requested 2024-11-19T18:30:01,502 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732041001502 2024-11-19T18:30:01,508 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,509 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:01,509 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741851_1034 2024-11-19T18:30:01,509 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:01,511 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,512 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:01,512 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741852_1035 2024-11-19T18:30:01,513 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:01,515 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,515 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:01,515 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741853_1036 2024-11-19T18:30:01,516 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:01,517 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,517 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:01,518 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741854_1037 2024-11-19T18:30:01,519 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:01,520 WARN [IPC Server handler 4 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:01,520 WARN [IPC Server handler 4 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:01,520 WARN [IPC Server handler 4 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:01,526 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:01,526 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:01,526 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:01,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:01,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:01,528 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040997486 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041001502 2024-11-19T18:30:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741840_1023 (size=24823) 2024-11-19T18:30:01,531 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:30:01,541 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040993477 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C34263%2C1732040981107.1732040993477 2024-11-19T18:30:01,542 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37267:37267)] 2024-11-19T18:30:01,542 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:30:01,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:01,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T18:30:01,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/434dcbeb64b44ee2b9bf6823fc264acf is 1079, key is tmprow/info:/1732041001555/Put/seqid=0 2024-11-19T18:30:01,565 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,566 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:01,566 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741856_1039 2024-11-19T18:30:01,566 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:01,568 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,568 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:01,568 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741857_1040 2024-11-19T18:30:01,568 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:01,570 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,570 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:01,570 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741858_1041 2024-11-19T18:30:01,571 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:01,572 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,572 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:01,572 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741859_1042 2024-11-19T18:30:01,572 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:01,573 WARN [IPC Server handler 1 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:01,573 WARN [IPC Server handler 1 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:01,573 WARN [IPC Server handler 1 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:01,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741860_1043 (size=6027) 2024-11-19T18:30:01,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/434dcbeb64b44ee2b9bf6823fc264acf 2024-11-19T18:30:01,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/434dcbeb64b44ee2b9bf6823fc264acf as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf 2024-11-19T18:30:01,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf, entries=1, sequenceid=34, filesize=5.9 K 2024-11-19T18:30:01,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c2b57c628ca9f719f9c427e72d9c8d20 in 51ms, sequenceid=34, compaction requested=true 2024-11-19T18:30:01,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:01,608 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-19T18:30:01,608 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:01,608 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298 because midkey is the same as first or last row 2024-11-19T18:30:01,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2b57c628ca9f719f9c427e72d9c8d20:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:30:01,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:30:01,620 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:30:01,623 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:30:01,623 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1541): c2b57c628ca9f719f9c427e72d9c8d20/info is initiating minor compaction (all files) 2024-11-19T18:30:01,623 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c2b57c628ca9f719f9c427e72d9c8d20/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:01,623 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf] into tmpdir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp, totalSize=28.2 K 2024-11-19T18:30:01,624 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting b592d6259fea45f290b0a4320c6bd9e0, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732040995493 2024-11-19T18:30:01,624 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae540b4e208243cd892f45cb54e5a298, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732040999517 2024-11-19T18:30:01,625 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting 434dcbeb64b44ee2b9bf6823fc264acf, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732041001555 2024-11-19T18:30:01,663 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2b57c628ca9f719f9c427e72d9c8d20#info#compaction#21 average throughput is 1.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:30:01,663 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/1fee078fcdba4d0b9fd6b1c18877a2bd is 1080, key is row0002/info:/1732040995493/Put/seqid=0 2024-11-19T18:30:01,666 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,666 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:01,666 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741861_1044 2024-11-19T18:30:01,667 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:01,669 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,670 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:01,670 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741862_1045 2024-11-19T18:30:01,670 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:01,679 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44689 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,680 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:01,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41700 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046 to mirror 127.0.0.1:44689 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:01,680 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046 2024-11-19T18:30:01,680 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41700 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:30:01,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41700 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41700 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:01,680 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:01,682 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:01,683 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:01,683 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741864_1047 2024-11-19T18:30:01,683 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:01,684 WARN [IPC Server handler 1 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:01,684 WARN [IPC Server handler 1 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:01,684 WARN [IPC Server handler 1 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:01,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741865_1048 (size=17994) 2024-11-19T18:30:01,708 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/1fee078fcdba4d0b9fd6b1c18877a2bd as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd 2024-11-19T18:30:01,725 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c2b57c628ca9f719f9c427e72d9c8d20/info of c2b57c628ca9f719f9c427e72d9c8d20 into 1fee078fcdba4d0b9fd6b1c18877a2bd(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:01,726 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20., storeName=c2b57c628ca9f719f9c427e72d9c8d20/info, priority=13, startTime=1732041001608; duration=0sec 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd because midkey is the same as first or last row 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd because midkey is the same as first or last row 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd because midkey is the same as first or last row 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:30:01,726 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2b57c628ca9f719f9c427e72d9c8d20:info 2024-11-19T18:30:02,030 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741845_1028 to 127.0.0.1:41507 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:02,030 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741850_1033 to 127.0.0.1:33555 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:02,583 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:02,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T18:30:02,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/c287b8cd4ca64436806b810fdb0374cb is 1079, key is tmprow/info:/1732041002979/Put/seqid=0 2024-11-19T18:30:02,988 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:02,988 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:02,988 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741866_1049 2024-11-19T18:30:02,989 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:02,990 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:02,990 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:02,990 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741867_1050 2024-11-19T18:30:02,991 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:02,992 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:02,992 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:02,992 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741868_1051 2024-11-19T18:30:02,992 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:02,993 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:02,994 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:02,994 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741869_1052 2024-11-19T18:30:02,994 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:02,995 WARN [IPC Server handler 3 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:02,995 WARN [IPC Server handler 3 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:02,995 WARN [IPC Server handler 3 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:02,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741870_1053 (size=6027) 2024-11-19T18:30:03,228 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/c287b8cd4ca64436806b810fdb0374cb 2024-11-19T18:30:03,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/c287b8cd4ca64436806b810fdb0374cb as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb 2024-11-19T18:30:03,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb, entries=1, sequenceid=45, filesize=5.9 K 2024-11-19T18:30:03,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c2b57c628ca9f719f9c427e72d9c8d20 in 433ms, sequenceid=45, compaction requested=false 2024-11-19T18:30:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-19T18:30:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:03,414 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd because midkey is the same as first or last row 2024-11-19T18:30:03,542 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]] 2024-11-19T18:30:03,542 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,542 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C34263%2C1732040981107:(num 1732041001502) roll requested 2024-11-19T18:30:03,543 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732041003543 2024-11-19T18:30:03,546 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,546 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:03,546 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741871_1054 2024-11-19T18:30:03,547 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:03,548 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,548 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:03,548 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741872_1055 2024-11-19T18:30:03,549 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:03,550 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,550 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:03,550 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741873_1056 2024-11-19T18:30:03,550 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:03,551 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:03,552 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:03,552 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741874_1057 2024-11-19T18:30:03,552 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:03,553 WARN [IPC Server handler 3 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:03,553 WARN [IPC Server handler 3 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:03,553 WARN [IPC Server handler 3 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:03,555 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:03,555 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:03,556 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:03,556 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:03,556 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:03,556 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041001502 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041003543 2024-11-19T18:30:03,557 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37267:37267)] 2024-11-19T18:30:03,557 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:30:03,557 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041001502 is not closed yet, will try archiving it next time 2024-11-19T18:30:03,557 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040997486 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C34263%2C1732040981107.1732040997486 2024-11-19T18:30:03,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741855_1038 (size=13591) 2024-11-19T18:30:03,959 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 is not closed yet, will try archiving it next time 2024-11-19T18:30:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:04,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T18:30:04,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/fbcdfeddaef24696ac61b6006a98124f is 1079, key is tmprow/info:/1732041004399/Put/seqid=0 2024-11-19T18:30:04,407 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,407 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:04,407 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741876_1059 2024-11-19T18:30:04,407 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:04,409 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,409 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:04,409 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741877_1060 2024-11-19T18:30:04,409 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:04,410 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,410 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:04,411 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741878_1061 2024-11-19T18:30:04,411 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:04,413 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33555 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41718 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062 to mirror 127.0.0.1:33555 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:04,414 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:04,414 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062 2024-11-19T18:30:04,414 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41718 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:30:04,414 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41718 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41718 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:04,414 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:04,415 WARN [IPC Server handler 4 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:04,415 WARN [IPC Server handler 4 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:04,415 WARN [IPC Server handler 4 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:04,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741880_1063 (size=6027) 2024-11-19T18:30:04,583 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/fbcdfeddaef24696ac61b6006a98124f 2024-11-19T18:30:04,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/fbcdfeddaef24696ac61b6006a98124f as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f 2024-11-19T18:30:04,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f, entries=1, sequenceid=55, filesize=5.9 K 2024-11-19T18:30:04,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c2b57c628ca9f719f9c427e72d9c8d20 in 432ms, sequenceid=55, compaction requested=true 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd because midkey is the same as first or last row 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2b57c628ca9f719f9c427e72d9c8d20:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:30:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:30:04,833 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:30:04,834 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:30:04,834 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1541): c2b57c628ca9f719f9c427e72d9c8d20/info is initiating minor compaction (all files) 2024-11-19T18:30:04,834 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c2b57c628ca9f719f9c427e72d9c8d20/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:04,834 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f] into tmpdir=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp, totalSize=29.3 K 2024-11-19T18:30:04,835 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1fee078fcdba4d0b9fd6b1c18877a2bd, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732040995493 2024-11-19T18:30:04,835 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting c287b8cd4ca64436806b810fdb0374cb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732041002979 2024-11-19T18:30:04,835 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbcdfeddaef24696ac61b6006a98124f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732041004399 2024-11-19T18:30:04,850 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2b57c628ca9f719f9c427e72d9c8d20#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:30:04,851 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/4a4a2174774e4a689104aa34198035dd is 1080, key is row0002/info:/1732040995493/Put/seqid=0 2024-11-19T18:30:04,853 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,853 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]) is bad. 2024-11-19T18:30:04,853 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741881_1064 2024-11-19T18:30:04,854 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44689,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK] 2024-11-19T18:30:04,855 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,855 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:04,855 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741882_1065 2024-11-19T18:30:04,856 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:04,858 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41734 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6]'}, localName='127.0.0.1:33901', datanodeUuid='a8a8c18e-135e-4b5d-ad0c-15901bab7072', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066 to mirror 127.0.0.1:35317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:04,858 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK], DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:04,858 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066 2024-11-19T18:30:04,858 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41734 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T18:30:04,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1079475717_22 at /127.0.0.1:41734 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:33901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41734 dst: /127.0.0.1:33901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:04,858 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:04,859 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:04,860 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:04,860 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741884_1067 2024-11-19T18:30:04,860 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:04,861 WARN [IPC Server handler 0 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T18:30:04,861 WARN [IPC Server handler 0 on default port 44171 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T18:30:04,861 WARN [IPC Server handler 0 on default port 44171 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T18:30:04,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741885_1068 (size=18097) 2024-11-19T18:30:05,027 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741860_1043 to 127.0.0.1:41507 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:05,027 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741840_1023 to 127.0.0.1:33555 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:05,228 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:05,271 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/4a4a2174774e4a689104aa34198035dd as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/4a4a2174774e4a689104aa34198035dd 2024-11-19T18:30:05,278 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c2b57c628ca9f719f9c427e72d9c8d20/info of c2b57c628ca9f719f9c427e72d9c8d20 into 4a4a2174774e4a689104aa34198035dd(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:05,278 INFO [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20., storeName=c2b57c628ca9f719f9c427e72d9c8d20/info, priority=13, startTime=1732041004833; duration=0sec 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/4a4a2174774e4a689104aa34198035dd because midkey is the same as first or last row 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/4a4a2174774e4a689104aa34198035dd because midkey is the same as first or last row 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:05,278 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/4a4a2174774e4a689104aa34198035dd because midkey is the same as first or last row 2024-11-19T18:30:05,279 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:30:05,279 DEBUG [RS:0;30db5f576be8:34263-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2b57c628ca9f719f9c427e72d9c8d20:info 2024-11-19T18:30:05,557 WARN [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-19T18:30:05,557 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:05,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:05,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:05,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:05,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:05,627 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:30:05,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aa56449{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:05,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@633469fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:05,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330b5e2a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/java.io.tmpdir/jetty-localhost-46671-hadoop-hdfs-3_4_1-tests_jar-_-any-9381637342681679150/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:05,746 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13c2f5a4{HTTP/1.1, (http/1.1)}{localhost:46671} 2024-11-19T18:30:05,746 INFO [Time-limited test {}] server.Server(415): Started @129128ms 2024-11-19T18:30:05,747 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:05,852 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41d171811ed84949 with lease ID 0x2f94b0dd432c3290: from storage DS-bb6bd24d-f684-4186-ac36-152d27a55a13 node DatanodeRegistration(127.0.0.1:33519, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=41525, infoSecurePort=0, ipcPort=35767, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T18:30:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41d171811ed84949 with lease ID 0x2f94b0dd432c3290: from storage DS-6ca4ee45-d0a7-4be1-b3c7-3c8bf3fe00c8 node DatanodeRegistration(127.0.0.1:33519, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=41525, infoSecurePort=0, ipcPort=35767, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:06,028 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741870_1053 to 127.0.0.1:33555 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:06,028 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741865_1048 to 127.0.0.1:33555 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:06,584 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:07,229 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:07,558 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:08,028 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741855_1038 to 127.0.0.1:41507 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:08,028 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741880_1063 to 127.0.0.1:41507 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:08,584 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:09,028 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33901, datanodeUuid=a8a8c18e-135e-4b5d-ad0c-15901bab7072, infoPort=37267, infoSecurePort=0, ipcPort=35907, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741885_1068 to 127.0.0.1:33555 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:09,229 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:09,558 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:10,585 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,006 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:30:11,229 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,402 ERROR [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData-prefix:30db5f576be8,46573,1732040981038 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,403 WARN [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData-prefix:30db5f576be8,46573,1732040981038 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,403 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C46573%2C1732040981038:(num 1732040981264) roll requested 2024-11-19T18:30:11,403 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C46573%2C1732040981038.1732041011403 2024-11-19T18:30:11,408 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41507 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46000 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data4]'}, localName='127.0.0.1:33519', datanodeUuid='9cea56a8-0f68-4292-b072-5e8ede8ccc4a', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069 to mirror 127.0.0.1:41507 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:11,408 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33519,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:11,408 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46000 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T18:30:11,408 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069 2024-11-19T18:30:11,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46000 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:33519:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46000 dst: /127.0.0.1:33519 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:11,410 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:11,412 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33555 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data4]'}, localName='127.0.0.1:33519', datanodeUuid='9cea56a8-0f68-4292-b072-5e8ede8ccc4a', xmitsInProgress=0}:Exception transferring block BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070 to mirror 127.0.0.1:33555 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:11,412 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33519,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:11,412 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070 2024-11-19T18:30:11,412 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T18:30:11,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-969062979_22 at /127.0.0.1:46014 [Receiving block BP-1554420387-172.17.0.2-1732040980060:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:33519:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46014 dst: /127.0.0.1:33519 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:11,413 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:11,424 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:11,424 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:11,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:11,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:11,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:11,424 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732041011403 2024-11-19T18:30:11,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,425 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:11,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 2024-11-19T18:30:11,426 WARN [IPC Server handler 2 on default port 44171 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-11-19T18:30:11,426 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 after 1ms 2024-11-19T18:30:11,428 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41525:41525),(127.0.0.1/127.0.0.1:37267:37267)] 2024-11-19T18:30:11,428 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 is not closed yet, will try archiving it next time 2024-11-19T18:30:11,559 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:12,585 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:13,559 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:14,585 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:15,427 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 after 4002ms 2024-11-19T18:30:15,559 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:15,880 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d39b79b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35317,null,null]) java.net.ConnectException: Call From 30db5f576be8/172.17.0.2 to localhost:41019 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T18:30:15,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741833_1019 (size=455) 2024-11-19T18:30:16,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732040981710 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C34263%2C1732040981107.1732040981710 2024-11-19T18:30:16,512 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041001502 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C34263%2C1732040981107.1732041001502 2024-11-19T18:30:16,586 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:16,857 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d1648d4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33519, datanodeUuid=9cea56a8-0f68-4292-b072-5e8ede8ccc4a, infoPort=41525, infoSecurePort=0, ipcPort=35767, storageInfo=lv=-57;cid=testClusterID;nsid=1790152791;c=1732040980060):Failed to transfer BP-1554420387-172.17.0.2-1732040980060:blk_1073741833_1019 to 127.0.0.1:35317 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:17,560 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:18,586 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,379 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.1732041019379 2024-11-19T18:30:19,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,386 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041003543 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041019379 2024-11-19T18:30:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741875_1058 (size=12911) 2024-11-19T18:30:19,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37267:37267),(127.0.0.1/127.0.0.1:41525:41525)] 2024-11-19T18:30:19,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041003543 is not closed yet, will try archiving it next time 2024-11-19T18:30:19,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34263 {}] regionserver.HRegion(8855): Flush requested on c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:19,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T18:30:19,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/e0faa117a6894fc995ec0127b958a820 is 1080, key is row0013/info:/1732041019389/Put/seqid=0 2024-11-19T18:30:19,400 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,400 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:19,400 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741890_1074 2024-11-19T18:30:19,401 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:19,402 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,402 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]) is bad. 2024-11-19T18:30:19,402 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741891_1075 2024-11-19T18:30:19,403 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK] 2024-11-19T18:30:19,404 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,405 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33901,DS-2f2428e1-acf4-4ae5-82ce-dfb2b93cecb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:19,405 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741892_1076 2024-11-19T18:30:19,405 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741893_1077 (size=8190) 2024-11-19T18:30:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741893_1077 (size=8190) 2024-11-19T18:30:19,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/e0faa117a6894fc995ec0127b958a820 2024-11-19T18:30:19,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/e0faa117a6894fc995ec0127b958a820 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/e0faa117a6894fc995ec0127b958a820 2024-11-19T18:30:19,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/e0faa117a6894fc995ec0127b958a820, entries=3, sequenceid=66, filesize=8.0 K 2024-11-19T18:30:19,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for c2b57c628ca9f719f9c427e72d9c8d20 in 37ms, sequenceid=66, compaction requested=false 2024-11-19T18:30:19,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c2b57c628ca9f719f9c427e72d9c8d20: 2024-11-19T18:30:19,431 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-19T18:30:19,431 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:30:19,431 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/4a4a2174774e4a689104aa34198035dd because midkey is the same as first or last row 2024-11-19T18:30:19,560 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-19T18:30:19,560 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:30:19,610 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:30:19,611 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:19,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:19,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:19,611 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:30:19,611 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:30:19,611 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1312823663, stopped=false 2024-11-19T18:30:19,611 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,46573,1732040981038 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:19,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:19,614 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:30:19,614 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:30:19,614 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:19,614 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:19,614 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:19,614 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:19,614 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,34263,1732040981107' ***** 2024-11-19T18:30:19,614 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:19,614 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:30:19,614 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,45935,1732040982421' ***** 2024-11-19T18:30:19,614 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:30:19,615 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,45935,1732040982421 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:30:19,615 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:30:19,615 INFO [RS:1;30db5f576be8:45935 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;30db5f576be8:45935. 2024-11-19T18:30:19,615 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:30:19,615 INFO [RS:0;30db5f576be8:34263 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:30:19,615 DEBUG [RS:1;30db5f576be8:45935 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:19,615 DEBUG [RS:1;30db5f576be8:45935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:19,615 INFO [RS:0;30db5f576be8:34263 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:30:19,616 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,45935,1732040982421; all regions closed. 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(3091): Received CLOSE for c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,34263,1732040981107 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:34263. 2024-11-19T18:30:19,616 DEBUG [RS:0;30db5f576be8:34263 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:19,616 DEBUG [RS:0;30db5f576be8:34263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:30:19,616 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c2b57c628ca9f719f9c427e72d9c8d20, disabling compactions & flushes 2024-11-19T18:30:19,616 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:30:19,616 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:19,616 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:19,616 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. after waiting 0 ms 2024-11-19T18:30:19,616 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:19,617 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T18:30:19,617 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c2b57c628ca9f719f9c427e72d9c8d20=TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.} 2024-11-19T18:30:19,617 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c2b57c628ca9f719f9c427e72d9c8d20 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-19T18:30:19,617 DEBUG [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c2b57c628ca9f719f9c427e72d9c8d20 2024-11-19T18:30:19,617 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:30:19,617 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:30:19,617 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:30:19,617 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:30:19,617 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:30:19,617 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-19T18:30:19,617 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,617 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,618 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,617 ERROR [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39-prefix:30db5f576be8,34263,1732040981107.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,618 WARN [FSHLog-0-hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39-prefix:30db5f576be8,34263,1732040981107.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,618 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,618 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C34263%2C1732040981107.meta:.meta(num 1732040982139) roll requested 2024-11-19T18:30:19,618 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,618 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C34263%2C1732040981107.meta.1732041019618.meta 2024-11-19T18:30:19,619 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,619 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,619 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 2024-11-19T18:30:19,619 WARN [IPC Server handler 3 on default port 44171 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741837_1013 2024-11-19T18:30:19,620 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 after 1ms 2024-11-19T18:30:19,623 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/feb253fc6d9d4c6494c8eab369af07b4 is 1080, key is row0015/info:/1732041019394/Put/seqid=0 2024-11-19T18:30:19,624 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,624 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33519,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:19,624 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741895_1080 2024-11-19T18:30:19,625 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:19,631 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,631 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,631 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,631 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,631 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,631 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732041019618.meta 2024-11-19T18:30:19,634 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,635 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,635 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta 2024-11-19T18:30:19,635 WARN [IPC Server handler 2 on default port 44171 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-19T18:30:19,635 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta after 0ms 2024-11-19T18:30:19,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741896_1081 (size=14660) 2024-11-19T18:30:19,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741896_1081 (size=14660) 2024-11-19T18:30:19,639 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/feb253fc6d9d4c6494c8eab369af07b4 2024-11-19T18:30:19,642 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41525:41525),(127.0.0.1/127.0.0.1:37267:37267)] 2024-11-19T18:30:19,642 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta is not closed yet, will try archiving it next time 2024-11-19T18:30:19,647 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/.tmp/info/feb253fc6d9d4c6494c8eab369af07b4 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/feb253fc6d9d4c6494c8eab369af07b4 2024-11-19T18:30:19,653 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/feb253fc6d9d4c6494c8eab369af07b4, entries=9, sequenceid=78, filesize=14.3 K 2024-11-19T18:30:19,654 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c2b57c628ca9f719f9c427e72d9c8d20 in 38ms, sequenceid=78, compaction requested=true 2024-11-19T18:30:19,655 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f] to archive 2024-11-19T18:30:19,656 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:30:19,658 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/b592d6259fea45f290b0a4320c6bd9e0 2024-11-19T18:30:19,659 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/ae540b4e208243cd892f45cb54e5a298 2024-11-19T18:30:19,661 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/1fee078fcdba4d0b9fd6b1c18877a2bd 2024-11-19T18:30:19,662 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/434dcbeb64b44ee2b9bf6823fc264acf 2024-11-19T18:30:19,663 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/c287b8cd4ca64436806b810fdb0374cb 2024-11-19T18:30:19,665 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/info/fbcdfeddaef24696ac61b6006a98124f 2024-11-19T18:30:19,665 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30db5f576be8:46573 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-19T18:30:19,665 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b592d6259fea45f290b0a4320c6bd9e0=10347, ae540b4e208243cd892f45cb54e5a298=12506, 1fee078fcdba4d0b9fd6b1c18877a2bd=17994, 434dcbeb64b44ee2b9bf6823fc264acf=6027, c287b8cd4ca64436806b810fdb0374cb=6027, fbcdfeddaef24696ac61b6006a98124f=6027] 2024-11-19T18:30:19,668 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/info/b2dc768e6791462fa57a087ea9ab1f38 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20./info:regioninfo/1732040983136/Put/seqid=0 2024-11-19T18:30:19,670 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,671 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:19,671 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741897_1083 2024-11-19T18:30:19,671 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:19,672 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c2b57c628ca9f719f9c427e72d9c8d20/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-19T18:30:19,672 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:19,672 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c2b57c628ca9f719f9c427e72d9c8d20: Waiting for close lock at 1732041019616Running coprocessor pre-close hooks at 1732041019616Disabling compacts and flushes for region at 1732041019616Disabling writes for close at 1732041019616Obtaining lock to block concurrent updates at 1732041019617 (+1 ms)Preparing flush snapshotting stores in c2b57c628ca9f719f9c427e72d9c8d20 at 1732041019617Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732041019617Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. at 1732041019618 (+1 ms)Flushing c2b57c628ca9f719f9c427e72d9c8d20/info: creating writer at 1732041019618Flushing c2b57c628ca9f719f9c427e72d9c8d20/info: appending metadata at 1732041019622 (+4 ms)Flushing c2b57c628ca9f719f9c427e72d9c8d20/info: closing flushed file at 1732041019622Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7444a035: reopening flushed file at 1732041019646 (+24 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c2b57c628ca9f719f9c427e72d9c8d20 in 38ms, sequenceid=78, compaction requested=true at 1732041019654 (+8 ms)Writing region close event to WAL at 1732041019667 (+13 ms)Running coprocessor post-close hooks at 1732041019672 (+5 ms)Closed at 1732041019672 2024-11-19T18:30:19,673 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732040982637.c2b57c628ca9f719f9c427e72d9c8d20. 2024-11-19T18:30:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741898_1084 (size=7089) 2024-11-19T18:30:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741898_1084 (size=7089) 2024-11-19T18:30:19,683 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/info/b2dc768e6791462fa57a087ea9ab1f38 2024-11-19T18:30:19,706 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/ns/99b1902c3ba648e78bffdaa9636201d1 is 43, key is default/ns:d/1732040982283/Put/seqid=0 2024-11-19T18:30:19,707 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,708 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK], DatanodeInfoWithStorage[127.0.0.1:33555,DS-1f7d7a36-4a06-428f-b2eb-c02767de7ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK]) is bad. 2024-11-19T18:30:19,708 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741899_1085 2024-11-19T18:30:19,708 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35317,DS-cc2b32c9-63f5-4c72-aded-2dc717d6dd82,DISK] 2024-11-19T18:30:19,709 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:19,710 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1554420387-172.17.0.2-1732040980060:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK], DatanodeInfoWithStorage[127.0.0.1:33519,DS-bb6bd24d-f684-4186-ac36-152d27a55a13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK]) is bad. 2024-11-19T18:30:19,710 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1554420387-172.17.0.2-1732040980060:blk_1073741900_1086 2024-11-19T18:30:19,710 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41507,DS-ec718a4e-5766-4d62-a074-a2b7de38b0ca,DISK] 2024-11-19T18:30:19,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741901_1087 (size=5153) 2024-11-19T18:30:19,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741901_1087 (size=5153) 2024-11-19T18:30:19,716 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/ns/99b1902c3ba648e78bffdaa9636201d1 2024-11-19T18:30:19,738 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/table/5bf0c7fce8ea457aa5fcd78078e84bcc is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732040983154/Put/seqid=0 2024-11-19T18:30:19,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741902_1088 (size=5424) 2024-11-19T18:30:19,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741902_1088 (size=5424) 2024-11-19T18:30:19,746 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/table/5bf0c7fce8ea457aa5fcd78078e84bcc 2024-11-19T18:30:19,753 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/info/b2dc768e6791462fa57a087ea9ab1f38 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/info/b2dc768e6791462fa57a087ea9ab1f38 2024-11-19T18:30:19,759 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/info/b2dc768e6791462fa57a087ea9ab1f38, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T18:30:19,760 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/ns/99b1902c3ba648e78bffdaa9636201d1 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/ns/99b1902c3ba648e78bffdaa9636201d1 2024-11-19T18:30:19,766 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/ns/99b1902c3ba648e78bffdaa9636201d1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T18:30:19,767 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/.tmp/table/5bf0c7fce8ea457aa5fcd78078e84bcc as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/table/5bf0c7fce8ea457aa5fcd78078e84bcc 2024-11-19T18:30:19,772 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/table/5bf0c7fce8ea457aa5fcd78078e84bcc, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T18:30:19,774 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-11-19T18:30:19,779 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T18:30:19,779 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:30:19,779 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:30:19,780 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041019617Running coprocessor pre-close hooks at 1732041019617Disabling compacts and flushes for region at 1732041019617Disabling writes for close at 1732041019617Obtaining lock to block concurrent updates at 1732041019617Preparing flush snapshotting stores in 1588230740 at 1732041019617Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732041019618 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732041019643 (+25 ms)Flushing 1588230740/info: creating writer at 1732041019643Flushing 1588230740/info: appending metadata at 1732041019668 (+25 ms)Flushing 1588230740/info: closing flushed file at 1732041019668Flushing 1588230740/ns: creating writer at 1732041019689 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732041019705 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732041019705Flushing 1588230740/table: creating writer at 1732041019722 (+17 ms)Flushing 1588230740/table: appending metadata at 1732041019737 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732041019737Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d601b7c: reopening flushed file at 1732041019752 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a8b2b06: reopening flushed file at 1732041019759 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41b55d0: reopening flushed file at 1732041019766 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1732041019774 (+8 ms)Writing region close event to WAL at 1732041019775 (+1 ms)Running coprocessor post-close hooks at 1732041019779 (+4 ms)Closed at 1732041019779 2024-11-19T18:30:19,780 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:30:19,788 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.1732041003543 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C34263%2C1732040981107.1732041003543 2024-11-19T18:30:19,817 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,34263,1732040981107; all regions closed. 2024-11-19T18:30:19,818 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,818 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,818 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,818 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,819 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:19,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741894_1079 (size=825) 2024-11-19T18:30:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741894_1079 (size=825) 2024-11-19T18:30:20,549 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T18:30:20,549 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T18:30:20,598 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:20,620 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T18:30:20,621 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T18:30:21,554 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:21,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T18:30:21,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:30:21,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:30:22,395 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T18:30:22,395 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T18:30:22,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:30:22,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741835_1011 (size=393) 2024-11-19T18:30:23,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741875_1058 (size=12911) 2024-11-19T18:30:23,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 after 4002ms 2024-11-19T18:30:23,636 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta after 4001ms 2024-11-19T18:30:23,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:30:24,619 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T18:30:24,621 DEBUG [RS:1;30db5f576be8:45935 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs 2024-11-19T18:30:24,621 INFO [RS:1;30db5f576be8:45935 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C45935%2C1732040982421:(num 1732040982738) 2024-11-19T18:30:24,621 DEBUG [RS:1;30db5f576be8:45935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:24,621 INFO [RS:1;30db5f576be8:45935 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:24,621 INFO [RS:1;30db5f576be8:45935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:30:24,622 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:30:24,622 INFO [RS:1;30db5f576be8:45935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45935 2024-11-19T18:30:24,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:30:24,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,45935,1732040982421 2024-11-19T18:30:24,624 INFO [RS:1;30db5f576be8:45935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:30:24,625 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,45935,1732040982421] 2024-11-19T18:30:24,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:24,628 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,45935,1732040982421 already deleted, retry=false 2024-11-19T18:30:24,628 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,45935,1732040982421 expired; onlineServers=1 2024-11-19T18:30:24,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:24,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:24,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45935-0x101317de4a00002, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:24,726 INFO [RS:1;30db5f576be8:45935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:30:24,726 INFO [RS:1;30db5f576be8:45935 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,45935,1732040982421; zookeeper connection closed. 2024-11-19T18:30:24,726 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e 2024-11-19T18:30:24,819 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T18:30:24,824 DEBUG [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs 2024-11-19T18:30:24,824 INFO [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C34263%2C1732040981107.meta:.meta(num 1732041019618) 2024-11-19T18:30:24,825 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741889_1073 (size=14682) 2024-11-19T18:30:24,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741889_1073 (size=14682) 2024-11-19T18:30:24,830 DEBUG [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C34263%2C1732040981107:(num 1732041019379) 2024-11-19T18:30:24,830 DEBUG [RS:0;30db5f576be8:34263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:30:24,830 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:30:24,830 INFO [RS:0;30db5f576be8:34263 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34263 2024-11-19T18:30:24,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,34263,1732040981107 2024-11-19T18:30:24,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:30:24,833 INFO [RS:0;30db5f576be8:34263 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:30:24,834 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,34263,1732040981107] 2024-11-19T18:30:24,836 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,34263,1732040981107 already deleted, retry=false 2024-11-19T18:30:24,836 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,34263,1732040981107 expired; onlineServers=0 2024-11-19T18:30:24,837 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,46573,1732040981038' ***** 2024-11-19T18:30:24,837 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:30:24,837 DEBUG [M:0;30db5f576be8:46573 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:30:24,837 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:30:24,837 DEBUG [M:0;30db5f576be8:46573 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:30:24,837 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040981428 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732040981428,5,FailOnTimeoutGroup] 2024-11-19T18:30:24,837 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040981428 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732040981428,5,FailOnTimeoutGroup] 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:30:24,837 DEBUG [M:0;30db5f576be8:46573 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:30:24,837 INFO [M:0;30db5f576be8:46573 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:30:24,838 INFO [M:0;30db5f576be8:46573 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:30:24,838 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:30:24,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:30:24,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:24,841 DEBUG [M:0;30db5f576be8:46573 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-19T18:30:24,841 DEBUG [M:0;30db5f576be8:46573 {}] master.ActiveMasterManager(353): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-19T18:30:24,842 INFO [M:0;30db5f576be8:46573 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/.lastflushedseqids 2024-11-19T18:30:24,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741903_1089 (size=130) 2024-11-19T18:30:24,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741903_1089 (size=130) 2024-11-19T18:30:24,850 INFO [M:0;30db5f576be8:46573 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:30:24,850 INFO [M:0;30db5f576be8:46573 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:30:24,850 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:30:24,850 INFO [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:24,850 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:24,851 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:30:24,851 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:24,851 INFO [M:0;30db5f576be8:46573 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-19T18:30:24,875 DEBUG [M:0;30db5f576be8:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0a10e2e1307f475e8e3bc864f7dd4454 is 82, key is hbase:meta,,1/info:regioninfo/1732040982208/Put/seqid=0 2024-11-19T18:30:24,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741904_1090 (size=5672) 2024-11-19T18:30:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741904_1090 (size=5672) 2024-11-19T18:30:24,880 INFO [M:0;30db5f576be8:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0a10e2e1307f475e8e3bc864f7dd4454 2024-11-19T18:30:24,901 DEBUG [M:0;30db5f576be8:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/680c30764a9b4983ba8fe6372e1830f1 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732040983160/Put/seqid=0 2024-11-19T18:30:24,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741905_1091 (size=6254) 2024-11-19T18:30:24,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741905_1091 (size=6254) 2024-11-19T18:30:24,907 INFO [M:0;30db5f576be8:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/680c30764a9b4983ba8fe6372e1830f1 2024-11-19T18:30:24,912 INFO [M:0;30db5f576be8:46573 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 680c30764a9b4983ba8fe6372e1830f1 2024-11-19T18:30:24,927 DEBUG [M:0;30db5f576be8:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/937c2a08a16840caaf70d004b4a300ea is 69, key is 30db5f576be8,34263,1732040981107/rs:state/1732040981503/Put/seqid=0 2024-11-19T18:30:24,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741906_1092 (size=5224) 2024-11-19T18:30:24,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741906_1092 (size=5224) 2024-11-19T18:30:24,932 INFO [M:0;30db5f576be8:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/937c2a08a16840caaf70d004b4a300ea 2024-11-19T18:30:24,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:24,934 INFO [RS:0;30db5f576be8:34263 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:30:24,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34263-0x101317de4a00001, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:24,935 INFO [RS:0;30db5f576be8:34263 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,34263,1732040981107; zookeeper connection closed. 2024-11-19T18:30:24,935 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b29dc54 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b29dc54 2024-11-19T18:30:24,935 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-19T18:30:24,952 DEBUG [M:0;30db5f576be8:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fdd468f7cf4f4eccb2c350a02f79f153 is 52, key is load_balancer_on/state:d/1732040982372/Put/seqid=0 2024-11-19T18:30:24,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741907_1093 (size=5056) 2024-11-19T18:30:24,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741907_1093 (size=5056) 2024-11-19T18:30:24,958 INFO [M:0;30db5f576be8:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fdd468f7cf4f4eccb2c350a02f79f153 2024-11-19T18:30:24,964 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0a10e2e1307f475e8e3bc864f7dd4454 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0a10e2e1307f475e8e3bc864f7dd4454 2024-11-19T18:30:24,969 INFO [M:0;30db5f576be8:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0a10e2e1307f475e8e3bc864f7dd4454, entries=8, sequenceid=60, filesize=5.5 K 2024-11-19T18:30:24,970 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/680c30764a9b4983ba8fe6372e1830f1 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/680c30764a9b4983ba8fe6372e1830f1 2024-11-19T18:30:24,975 INFO [M:0;30db5f576be8:46573 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 680c30764a9b4983ba8fe6372e1830f1 2024-11-19T18:30:24,975 INFO [M:0;30db5f576be8:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/680c30764a9b4983ba8fe6372e1830f1, entries=6, sequenceid=60, filesize=6.1 K 2024-11-19T18:30:24,976 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/937c2a08a16840caaf70d004b4a300ea as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/937c2a08a16840caaf70d004b4a300ea 2024-11-19T18:30:24,981 INFO [M:0;30db5f576be8:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/937c2a08a16840caaf70d004b4a300ea, entries=2, sequenceid=60, filesize=5.1 K 2024-11-19T18:30:24,984 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fdd468f7cf4f4eccb2c350a02f79f153 as hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fdd468f7cf4f4eccb2c350a02f79f153 2024-11-19T18:30:24,990 INFO [M:0;30db5f576be8:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fdd468f7cf4f4eccb2c350a02f79f153, entries=1, sequenceid=60, filesize=4.9 K 2024-11-19T18:30:24,991 INFO [M:0;30db5f576be8:46573 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=60, compaction requested=false 2024-11-19T18:30:24,997 INFO [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:24,997 DEBUG [M:0;30db5f576be8:46573 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041024850Disabling compacts and flushes for region at 1732041024850Disabling writes for close at 1732041024851 (+1 ms)Obtaining lock to block concurrent updates at 1732041024851Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732041024851Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1732041024851Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732041024852 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732041024852Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732041024874 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732041024874Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732041024886 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732041024901 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732041024901Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732041024912 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732041024927 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732041024927Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732041024938 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732041024951 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732041024951Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42e44cb2: reopening flushed file at 1732041024963 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1004f474: reopening flushed file at 1732041024970 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21e6f0b8: reopening flushed file at 1732041024975 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f115544: reopening flushed file at 1732041024982 (+7 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=60, compaction requested=false at 1732041024991 (+9 ms)Writing region close event to WAL at 1732041024996 (+5 ms)Closed at 1732041024996 2024-11-19T18:30:24,998 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,998 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:24,998 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:25,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741888_1071 (size=1045) 2024-11-19T18:30:25,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33519 is added to blk_1073741888_1071 (size=1045) 2024-11-19T18:30:25,212 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:30:25,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:25,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:25,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:30:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33901 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:30:25,884 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@55fe52b3 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35317,null,null]) java.net.ConnectException: Call From 30db5f576be8/172.17.0.2 to localhost:41019 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T18:30:26,435 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/WALs/30db5f576be8,46573,1732040981038/30db5f576be8%2C46573%2C1732040981038.1732040981264 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/oldWALs/30db5f576be8%2C46573%2C1732040981038.1732040981264 2024-11-19T18:30:26,438 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/MasterData/oldWALs/30db5f576be8%2C46573%2C1732040981038.1732040981264 to hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/oldWALs/30db5f576be8%2C46573%2C1732040981038.1732040981264$masterlocalwal$ 2024-11-19T18:30:26,438 INFO [M:0;30db5f576be8:46573 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:30:26,438 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:30:26,439 INFO [M:0;30db5f576be8:46573 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46573 2024-11-19T18:30:26,439 INFO [M:0;30db5f576be8:46573 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:30:26,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:26,541 INFO [M:0;30db5f576be8:46573 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:30:26,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x101317de4a00000, quorum=127.0.0.1:60839, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:26,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330b5e2a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:26,543 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13c2f5a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:26,543 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:26,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@633469fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:26,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aa56449{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:26,545 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:26,545 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:26,545 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:35317,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:41019 , LocalHost:localPort 30db5f576be8/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T18:30:26,545 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:26,545 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid 9cea56a8-0f68-4292-b072-5e8ede8ccc4a) service to localhost/127.0.0.1:44171 2024-11-19T18:30:26,546 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data3/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:26,546 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data4/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:26,547 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(293): Failed to updateBlock (newblock=BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1078, datanode=DatanodeInfoWithStorage[127.0.0.1:33519,null,null]) org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Replica does not exist BP-1554420387-172.17.0.2-1732040980060:1073741837 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.getReplicaInfo(FsDatasetImpl.java:897) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.getStorageUuidForLock(FsDatasetImpl.java:905) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.updateReplicaUnderRecovery(FsDatasetImpl.java:3093) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode.updateReplicaUnderRecovery(DataNode.java:3537) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.updateReplicaUnderRecovery(BlockRecoveryWorker.java:88) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.access$700(BlockRecoveryWorker.java:71) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.syncBlock(BlockRecoveryWorker.java:289) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:183) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:26,547 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:26,547 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33519,null,null], DatanodeInfoWithStorage[127.0.0.1:35317,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: Cannot recover BP-1554420387-172.17.0.2-1732040980060:blk_1073741837_1013, the following datanodes failed: [DatanodeInfoWithStorage[127.0.0.1:33519,null,null]] 2024-11-19T18:30:26,547 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33519,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1554420387-172.17.0.2-1732040980060 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:26,547 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35317,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1554420387-172.17.0.2-1732040980060 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:26,547 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a5ae80f {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33519,null,null], DatanodeInfoWithStorage[127.0.0.1:35317,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1554420387-172.17.0.2-1732040980060:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33519,null,null], DatanodeInfoWithStorage[127.0.0.1:35317,null,null]] 2024-11-19T18:30:26,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1545e1f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:26,549 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@219c70cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:26,549 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:26,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7feb24a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:26,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f9b72e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:26,551 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:26,551 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:26,551 WARN [BP-1554420387-172.17.0.2-1732040980060 heartbeating to localhost/127.0.0.1:44171 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554420387-172.17.0.2-1732040980060 (Datanode Uuid a8a8c18e-135e-4b5d-ad0c-15901bab7072) service to localhost/127.0.0.1:44171 2024-11-19T18:30:26,551 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:26,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data5/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:26,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/cluster_4a8a5b06-20c2-f5c3-2472-339301fa7424/data/data6/current/BP-1554420387-172.17.0.2-1732040980060 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:26,552 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:26,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e21db3f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:30:26,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17c48ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:26,558 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:26,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46889226{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:26,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8247472{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:26,566 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:30:26,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:30:26,602 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44171 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff9acbef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44171 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44171 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44171 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44171 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44171 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44171 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44171 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff9acbef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44171 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44171 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44171 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=213 (was 256), ProcessCount=11 (was 11), AvailableMemoryMB=7149 (was 7426) 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=213, ProcessCount=11, AvailableMemoryMB=7150 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.log.dir so I do NOT create it in target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4b153f7-5941-bbb9-e18f-6fe3c73ec47f/hadoop.tmp.dir so I do NOT create it in target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7, deleteOnExit=true 2024-11-19T18:30:26,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/test.cache.data in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:30:26,610 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:30:26,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:30:26,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:30:26,625 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:30:26,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:26,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:26,696 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:26,701 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:26,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:26,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:26,702 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:30:26,702 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:26,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e526681{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:26,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24710539{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:26,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45a54d9c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-33337-hadoop-hdfs-3_4_1-tests_jar-_-any-10399482329967340012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:30:26,833 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@421a8f73{HTTP/1.1, (http/1.1)}{localhost:33337} 2024-11-19T18:30:26,833 INFO [Time-limited test {}] server.Server(415): Started @150216ms 2024-11-19T18:30:26,847 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:30:26,944 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:26,947 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:26,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:26,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:26,948 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:30:26,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61549df4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:26,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fd8c23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:27,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1cd8a5ec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-40141-hadoop-hdfs-3_4_1-tests_jar-_-any-8449721148942180868/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:27,070 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66c0323e{HTTP/1.1, (http/1.1)}{localhost:40141} 2024-11-19T18:30:27,071 INFO [Time-limited test {}] server.Server(415): Started @150453ms 2024-11-19T18:30:27,072 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:27,109 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:27,113 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:27,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:27,114 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:27,114 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:30:27,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49ef22be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:27,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232fa1ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:27,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@538e4271{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-38771-hadoop-hdfs-3_4_1-tests_jar-_-any-4983589448568157466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:27,240 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4360f0f4{HTTP/1.1, (http/1.1)}{localhost:38771} 2024-11-19T18:30:27,241 INFO [Time-limited test {}] server.Server(415): Started @150623ms 2024-11-19T18:30:27,242 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:27,242 WARN [Thread-1182 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data1/current/BP-1402508832-172.17.0.2-1732041026642/current, will proceed with Du for space computation calculation, 2024-11-19T18:30:27,242 WARN [Thread-1183 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data2/current/BP-1402508832-172.17.0.2-1732041026642/current, will proceed with Du for space computation calculation, 2024-11-19T18:30:27,259 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84c5d11cd013a11f with lease ID 0x8faebf225cee07d1: Processing first storage report for DS-59a3f465-3acf-4467-8d03-c261fe36cdfa from datanode DatanodeRegistration(127.0.0.1:41671, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35613, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642) 2024-11-19T18:30:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84c5d11cd013a11f with lease ID 0x8faebf225cee07d1: from storage DS-59a3f465-3acf-4467-8d03-c261fe36cdfa node DatanodeRegistration(127.0.0.1:41671, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35613, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84c5d11cd013a11f with lease ID 0x8faebf225cee07d1: Processing first storage report for DS-c9fcb9ea-7714-404e-a095-2693abfc2538 from datanode DatanodeRegistration(127.0.0.1:41671, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35613, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642) 2024-11-19T18:30:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84c5d11cd013a11f with lease ID 0x8faebf225cee07d1: from storage DS-c9fcb9ea-7714-404e-a095-2693abfc2538 node DatanodeRegistration(127.0.0.1:41671, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35613, infoSecurePort=0, ipcPort=46713, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:27,335 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data3/current/BP-1402508832-172.17.0.2-1732041026642/current, will proceed with Du for space computation calculation, 2024-11-19T18:30:27,335 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data4/current/BP-1402508832-172.17.0.2-1732041026642/current, will proceed with Du for space computation calculation, 2024-11-19T18:30:27,352 WARN [Thread-1197 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ade3073eb769553 with lease ID 0x8faebf225cee07d2: Processing first storage report for DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f from datanode DatanodeRegistration(127.0.0.1:46297, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36039, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642) 2024-11-19T18:30:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ade3073eb769553 with lease ID 0x8faebf225cee07d2: from storage DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f node DatanodeRegistration(127.0.0.1:46297, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36039, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ade3073eb769553 with lease ID 0x8faebf225cee07d2: Processing first storage report for DS-53026cb4-3f98-49a5-bc3c-80e46a27a555 from datanode DatanodeRegistration(127.0.0.1:46297, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36039, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642) 2024-11-19T18:30:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ade3073eb769553 with lease ID 0x8faebf225cee07d2: from storage DS-53026cb4-3f98-49a5-bc3c-80e46a27a555 node DatanodeRegistration(127.0.0.1:46297, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36039, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:27,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2 2024-11-19T18:30:27,370 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/zookeeper_0, clientPort=64240, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:30:27,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64240 2024-11-19T18:30:27,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:30:27,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:30:27,382 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780 with version=8 2024-11-19T18:30:27,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:30:27,385 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:30:27,385 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:30:27,386 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44045 2024-11-19T18:30:27,387 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44045 connecting to ZooKeeper ensemble=127.0.0.1:64240 2024-11-19T18:30:27,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:440450x0, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:30:27,392 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44045-0x101317e99b90000 connected 2024-11-19T18:30:27,411 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:27,414 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780, hbase.cluster.distributed=false 2024-11-19T18:30:27,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:30:27,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44045 2024-11-19T18:30:27,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44045 2024-11-19T18:30:27,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44045 2024-11-19T18:30:27,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44045 2024-11-19T18:30:27,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44045 2024-11-19T18:30:27,434 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:30:27,434 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:30:27,435 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:30:27,435 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35573 2024-11-19T18:30:27,436 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35573 connecting to ZooKeeper ensemble=127.0.0.1:64240 2024-11-19T18:30:27,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,439 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355730x0, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:30:27,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:355730x0, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:27,443 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35573-0x101317e99b90001 connected 2024-11-19T18:30:27,443 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:30:27,444 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:30:27,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:30:27,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:30:27,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35573 2024-11-19T18:30:27,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35573 2024-11-19T18:30:27,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35573 2024-11-19T18:30:27,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35573 2024-11-19T18:30:27,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35573 2024-11-19T18:30:27,458 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:44045 2024-11-19T18:30:27,458 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:30:27,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:30:27,462 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:30:27,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,464 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:30:27,465 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,44045,1732041027384 from backup master directory 2024-11-19T18:30:27,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:30:27,466 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:30:27,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:30:27,466 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,471 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/hbase.id] with ID: b8ae49c5-1d87-42c9-b084-b4b7e43322f1 2024-11-19T18:30:27,471 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/.tmp/hbase.id 2024-11-19T18:30:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:30:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:30:27,481 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/.tmp/hbase.id]:[hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/hbase.id] 2024-11-19T18:30:27,492 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:27,492 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:30:27,493 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T18:30:27,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:30:27,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:30:27,503 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:30:27,504 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:30:27,504 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:30:27,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:30:27,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:30:27,512 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store 2024-11-19T18:30:27,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:30:27,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:30:27,519 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:27,519 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:27,519 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041027519Disabling compacts and flushes for region at 1732041027519Disabling writes for close at 1732041027519Writing region close event to WAL at 1732041027519Closed at 1732041027519 2024-11-19T18:30:27,520 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/.initializing 2024-11-19T18:30:27,520 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,523 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C44045%2C1732041027384, suffix=, logDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384, archiveDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/oldWALs, maxLogs=10 2024-11-19T18:30:27,524 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C44045%2C1732041027384.1732041027523 2024-11-19T18:30:27,529 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 2024-11-19T18:30:27,530 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35613:35613),(127.0.0.1/127.0.0.1:36039:36039)] 2024-11-19T18:30:27,531 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:30:27,531 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:27,531 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,531 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:30:27,537 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:27,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:30:27,539 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:30:27,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:30:27,540 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:30:27,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:30:27,542 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:30:27,543 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,543 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,544 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,545 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,545 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,545 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:30:27,546 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:30:27,548 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:30:27,548 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738129, jitterRate=-0.06142066419124603}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:30:27,549 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732041027531Initializing all the Stores at 1732041027532 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041027532Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041027535 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041027535Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041027535Cleaning up temporary data from old regions at 1732041027545 (+10 ms)Region opened successfully at 1732041027549 (+4 ms) 2024-11-19T18:30:27,549 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:30:27,552 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42a2befb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:30:27,553 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:30:27,553 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:30:27,554 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:30:27,554 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:30:27,554 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:30:27,554 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:30:27,554 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:30:27,556 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:30:27,557 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:30:27,560 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:30:27,560 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:30:27,561 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:30:27,562 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:30:27,562 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:30:27,563 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:30:27,564 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:30:27,565 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:30:27,566 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:30:27,568 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:30:27,571 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:30:27,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:27,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:27,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,573 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,44045,1732041027384, sessionid=0x101317e99b90000, setting cluster-up flag (Was=false) 2024-11-19T18:30:27,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,581 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:30:27,582 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:27,591 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:30:27,591 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,44045,1732041027384 2024-11-19T18:30:27,593 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:30:27,594 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:30:27,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:30:27,595 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:30:27,595 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,44045,1732041027384 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:30:27,596 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:30:27,596 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:30:27,596 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:30:27,596 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:30:27,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:30:27,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:30:27,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041057598 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:30:27,598 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:30:27,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:30:27,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:30:27,600 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:30:27,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:30:27,600 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:30:27,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:30:27,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:30:27,601 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041027600,5,FailOnTimeoutGroup] 2024-11-19T18:30:27,601 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041027601,5,FailOnTimeoutGroup] 2024-11-19T18:30:27,601 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,601 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:30:27,601 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,601 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,601 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,601 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:30:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:30:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:30:27,609 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:30:27,609 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780 2024-11-19T18:30:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:30:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:30:27,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:27,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:30:27,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:30:27,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:27,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:30:27,625 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:30:27,625 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:27,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:30:27,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:30:27,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:27,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:30:27,628 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:30:27,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:27,629 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:27,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:30:27,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:27,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740 2024-11-19T18:30:27,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740 2024-11-19T18:30:27,631 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:30:27,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:30:27,632 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:30:27,633 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:30:27,635 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:30:27,636 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854311, jitterRate=0.08631356060504913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:30:27,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732041027621Initializing all the Stores at 1732041027622 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041027622Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041027622Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041027622Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041027622Cleaning up temporary data from old regions at 1732041027632 (+10 ms)Region opened successfully at 1732041027637 (+5 ms) 2024-11-19T18:30:27,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:30:27,637 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:30:27,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:30:27,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:30:27,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:30:27,637 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:30:27,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041027637Disabling compacts and flushes for region at 1732041027637Disabling writes for close at 1732041027637Writing region close event to WAL at 1732041027637Closed at 1732041027637 2024-11-19T18:30:27,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:27,639 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:30:27,639 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:30:27,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:30:27,641 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:30:27,642 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:30:27,648 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(746): ClusterId : b8ae49c5-1d87-42c9-b084-b4b7e43322f1 2024-11-19T18:30:27,649 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:30:27,651 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:30:27,651 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:30:27,653 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:30:27,654 DEBUG [RS:0;30db5f576be8:35573 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4040ae33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:30:27,666 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:35573 2024-11-19T18:30:27,666 INFO [RS:0;30db5f576be8:35573 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:30:27,666 INFO [RS:0;30db5f576be8:35573 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:30:27,666 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:30:27,666 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,44045,1732041027384 with port=35573, startcode=1732041027434 2024-11-19T18:30:27,667 DEBUG [RS:0;30db5f576be8:35573 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:30:27,669 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43713, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:30:27,669 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44045 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,669 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44045 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,671 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780 2024-11-19T18:30:27,671 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33823 2024-11-19T18:30:27,671 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:30:27,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:30:27,673 DEBUG [RS:0;30db5f576be8:35573 {}] zookeeper.ZKUtil(111): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,673 WARN [RS:0;30db5f576be8:35573 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:30:27,673 INFO [RS:0;30db5f576be8:35573 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:30:27,673 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,674 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,35573,1732041027434] 2024-11-19T18:30:27,677 INFO [RS:0;30db5f576be8:35573 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:30:27,678 INFO [RS:0;30db5f576be8:35573 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:30:27,679 INFO [RS:0;30db5f576be8:35573 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:30:27,679 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,679 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:30:27,680 INFO [RS:0;30db5f576be8:35573 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:30:27,680 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,680 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:30:27,681 DEBUG [RS:0;30db5f576be8:35573 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:30:27,681 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,682 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,682 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,682 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,682 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,682 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,35573,1732041027434-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:30:27,698 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:30:27,698 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,35573,1732041027434-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,698 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,698 INFO [RS:0;30db5f576be8:35573 {}] regionserver.Replication(171): 30db5f576be8,35573,1732041027434 started 2024-11-19T18:30:27,713 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:27,713 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,35573,1732041027434, RpcServer on 30db5f576be8/172.17.0.2:35573, sessionid=0x101317e99b90001 2024-11-19T18:30:27,713 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:30:27,713 DEBUG [RS:0;30db5f576be8:35573 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,713 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,35573,1732041027434' 2024-11-19T18:30:27,713 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,35573,1732041027434 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,35573,1732041027434' 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:30:27,714 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:30:27,715 DEBUG [RS:0;30db5f576be8:35573 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:30:27,715 INFO [RS:0;30db5f576be8:35573 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:30:27,715 INFO [RS:0;30db5f576be8:35573 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:30:27,792 WARN [30db5f576be8:44045 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:30:27,817 INFO [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C35573%2C1732041027434, suffix=, logDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434, archiveDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs, maxLogs=32 2024-11-19T18:30:27,819 INFO [RS:0;30db5f576be8:35573 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:27,829 INFO [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:27,830 DEBUG [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36039:36039),(127.0.0.1/127.0.0.1:35613:35613)] 2024-11-19T18:30:28,042 DEBUG [30db5f576be8:44045 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:30:28,043 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,35573,1732041027434 2024-11-19T18:30:28,044 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,35573,1732041027434, state=OPENING 2024-11-19T18:30:28,046 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:30:28,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:28,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:28,049 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:30:28,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:30:28,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,35573,1732041027434}] 2024-11-19T18:30:28,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:30:28,202 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:30:28,205 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39847, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:30:28,209 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:30:28,209 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:30:28,211 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C35573%2C1732041027434.meta, suffix=.meta, logDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434, archiveDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs, maxLogs=32 2024-11-19T18:30:28,211 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta 2024-11-19T18:30:28,221 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta 2024-11-19T18:30:28,228 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35613:35613),(127.0.0.1/127.0.0.1:36039:36039)] 2024-11-19T18:30:28,233 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:30:28,233 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:30:28,234 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:30:28,234 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:30:28,234 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:30:28,234 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:28,234 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:30:28,234 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:30:28,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:30:28,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:30:28,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:28,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:30:28,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:30:28,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:28,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:30:28,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:30:28,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:28,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:30:28,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:30:28,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:30:28,243 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:30:28,244 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740 2024-11-19T18:30:28,246 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740 2024-11-19T18:30:28,248 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:30:28,248 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:30:28,249 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:30:28,251 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:30:28,252 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739379, jitterRate=-0.0598314106464386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:30:28,252 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:30:28,253 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732041028234Writing region info on filesystem at 1732041028234Initializing all the Stores at 1732041028235 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041028235Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041028236 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041028236Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041028236Cleaning up temporary data from old regions at 1732041028248 (+12 ms)Running coprocessor post-open hooks at 1732041028252 (+4 ms)Region opened successfully at 1732041028253 (+1 ms) 2024-11-19T18:30:28,254 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732041028202 2024-11-19T18:30:28,257 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:30:28,258 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:30:28,258 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,35573,1732041027434 2024-11-19T18:30:28,259 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,35573,1732041027434, state=OPEN 2024-11-19T18:30:28,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:30:28,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:30:28,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:30:28,264 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,35573,1732041027434 2024-11-19T18:30:28,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:30:28,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:30:28,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,35573,1732041027434 in 215 msec 2024-11-19T18:30:28,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:30:28,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-19T18:30:28,272 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:30:28,272 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:30:28,274 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:30:28,274 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,35573,1732041027434, seqNum=-1] 2024-11-19T18:30:28,274 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:30:28,276 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:30:28,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 686 msec 2024-11-19T18:30:28,281 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732041028281, completionTime=-1 2024-11-19T18:30:28,281 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:30:28,281 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:30:28,283 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:30:28,283 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041088283 2024-11-19T18:30:28,283 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041148283 2024-11-19T18:30:28,283 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:44045, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,284 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,285 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:30:28,287 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.821sec 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:30:28,288 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:30:28,290 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:30:28,290 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:30:28,290 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44045,1732041027384-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:30:28,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57faf252, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:30:28,349 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,44045,-1 for getting cluster id 2024-11-19T18:30:28,349 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:30:28,351 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b8ae49c5-1d87-42c9-b084-b4b7e43322f1' 2024-11-19T18:30:28,351 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:30:28,351 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b8ae49c5-1d87-42c9-b084-b4b7e43322f1" 2024-11-19T18:30:28,351 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17607df8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:30:28,351 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,44045,-1] 2024-11-19T18:30:28,352 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:30:28,352 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:28,354 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:30:28,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5921b3be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:30:28,355 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:30:28,356 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,35573,1732041027434, seqNum=-1] 2024-11-19T18:30:28,356 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:30:28,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42186, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:30:28,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,44045,1732041027384 2024-11-19T18:30:28,359 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:30:28,362 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:30:28,362 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-19T18:30:28,362 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-19T18:30:28,362 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T18:30:28,363 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 30db5f576be8,44045,1732041027384 2024-11-19T18:30:28,363 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6a774343 2024-11-19T18:30:28,363 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T18:30:28,364 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45942, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T18:30:28,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T18:30:28,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T18:30:28,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:30:28,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T18:30:28,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T18:30:28,368 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-19T18:30:28,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:30:28,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T18:30:28,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741835_1011 (size=395) 2024-11-19T18:30:28,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741835_1011 (size=395) 2024-11-19T18:30:28,380 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d21be12d123699a43e78f95ae943248c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780 2024-11-19T18:30:28,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41671 is added to blk_1073741836_1012 (size=78) 2024-11-19T18:30:28,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46297 is added to blk_1073741836_1012 (size=78) 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing d21be12d123699a43e78f95ae943248c, disabling compactions & flushes 2024-11-19T18:30:28,387 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. after waiting 0 ms 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,387 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,387 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for d21be12d123699a43e78f95ae943248c: Waiting for close lock at 1732041028387Disabling compacts and flushes for region at 1732041028387Disabling writes for close at 1732041028387Writing region close event to WAL at 1732041028387Closed at 1732041028387 2024-11-19T18:30:28,388 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T18:30:28,389 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732041028388"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732041028388"}]},"ts":"1732041028388"} 2024-11-19T18:30:28,391 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T18:30:28,392 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T18:30:28,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041028392"}]},"ts":"1732041028392"} 2024-11-19T18:30:28,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-19T18:30:28,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d21be12d123699a43e78f95ae943248c, ASSIGN}] 2024-11-19T18:30:28,395 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d21be12d123699a43e78f95ae943248c, ASSIGN 2024-11-19T18:30:28,397 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d21be12d123699a43e78f95ae943248c, ASSIGN; state=OFFLINE, location=30db5f576be8,35573,1732041027434; forceNewPlan=false, retain=false 2024-11-19T18:30:28,547 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d21be12d123699a43e78f95ae943248c, regionState=OPENING, regionLocation=30db5f576be8,35573,1732041027434 2024-11-19T18:30:28,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d21be12d123699a43e78f95ae943248c, ASSIGN because future has completed 2024-11-19T18:30:28,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d21be12d123699a43e78f95ae943248c, server=30db5f576be8,35573,1732041027434}] 2024-11-19T18:30:28,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:28,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:28,708 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,708 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d21be12d123699a43e78f95ae943248c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:30:28,709 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,709 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:30:28,709 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,709 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,710 INFO [StoreOpener-d21be12d123699a43e78f95ae943248c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,712 INFO [StoreOpener-d21be12d123699a43e78f95ae943248c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d21be12d123699a43e78f95ae943248c columnFamilyName info 2024-11-19T18:30:28,712 DEBUG [StoreOpener-d21be12d123699a43e78f95ae943248c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:30:28,712 INFO [StoreOpener-d21be12d123699a43e78f95ae943248c-1 {}] regionserver.HStore(327): Store=d21be12d123699a43e78f95ae943248c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:30:28,712 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,713 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,713 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,714 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,714 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,716 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,719 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:30:28,719 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d21be12d123699a43e78f95ae943248c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724597, jitterRate=-0.07862813770771027}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:30:28,719 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:28,720 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d21be12d123699a43e78f95ae943248c: Running coprocessor pre-open hook at 1732041028709Writing region info on filesystem at 1732041028709Initializing all the Stores at 1732041028710 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041028710Cleaning up temporary data from old regions at 1732041028714 (+4 ms)Running coprocessor post-open hooks at 1732041028719 (+5 ms)Region opened successfully at 1732041028720 (+1 ms) 2024-11-19T18:30:28,721 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c., pid=6, masterSystemTime=1732041028704 2024-11-19T18:30:28,724 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,724 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:28,725 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d21be12d123699a43e78f95ae943248c, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,35573,1732041027434 2024-11-19T18:30:28,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d21be12d123699a43e78f95ae943248c, server=30db5f576be8,35573,1732041027434 because future has completed 2024-11-19T18:30:28,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T18:30:28,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d21be12d123699a43e78f95ae943248c, server=30db5f576be8,35573,1732041027434 in 178 msec 2024-11-19T18:30:28,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T18:30:28,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d21be12d123699a43e78f95ae943248c, ASSIGN in 338 msec 2024-11-19T18:30:28,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T18:30:28,736 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041028736"}]},"ts":"1732041028736"} 2024-11-19T18:30:28,738 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-19T18:30:28,739 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T18:30:28,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 374 msec 2024-11-19T18:30:29,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:29,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:30,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:30,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:31,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:31,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:31,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:30:31,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T18:30:31,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T18:30:31,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-19T18:30:31,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:30:31,862 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T18:30:32,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:32,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:33,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:33,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:33,736 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:30:33,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:33,770 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T18:30:33,771 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-19T18:30:34,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:34,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:35,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:35,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:36,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:36,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:37,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:37,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44045 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:30:38,393 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-19T18:30:38,393 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-19T18:30:38,395 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T18:30:38,396 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:38,399 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c., hostname=30db5f576be8,35573,1732041027434, seqNum=2] 2024-11-19T18:30:38,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:38,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:39,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:39,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:40,401 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:40,402 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,402 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,402 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,402 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK], DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]) is bad. 2024-11-19T18:30:40,402 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK], DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]) is bad. 2024-11-19T18:30:40,402 WARN [PacketResponder: BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46297] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,403 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK], DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46297,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]) is bad. 2024-11-19T18:30:40,403 WARN [PacketResponder: BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46297] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:50224 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50224 dst: /127.0.0.1:46297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:39864 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39864 dst: /127.0.0.1:41671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_857295712_22 at /127.0.0.1:39848 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39848 dst: /127.0.0.1:41671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_857295712_22 at /127.0.0.1:50186 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50186 dst: /127.0.0.1:46297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:39876 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39876 dst: /127.0.0.1:41671 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:50226 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50226 dst: /127.0.0.1:46297 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@538e4271{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:40,406 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4360f0f4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:40,406 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:40,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232fa1ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:40,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49ef22be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:40,408 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:40,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:40,408 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 202824d4-c4bf-427f-8c69-c05a8c2b3668) service to localhost/127.0.0.1:33823 2024-11-19T18:30:40,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:40,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data3/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:40,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data4/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:40,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:40,423 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:40,426 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:40,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:40,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:40,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:30:40,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@644dfd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:40,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7aa94e72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:40,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c49b856{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-37657-hadoop-hdfs-3_4_1-tests_jar-_-any-16584289109614715243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:40,548 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@431e378c{HTTP/1.1, (http/1.1)}{localhost:37657} 2024-11-19T18:30:40,548 INFO [Time-limited test {}] server.Server(415): Started @163931ms 2024-11-19T18:30:40,550 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:40,573 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,573 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:49952 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49952 dst: /127.0.0.1:41671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,574 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:40,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_857295712_22 at /127.0.0.1:49948 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49948 dst: /127.0.0.1:41671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:49964 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49964 dst: /127.0.0.1:41671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:40,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1cd8a5ec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:40,577 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66c0323e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:40,577 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:40,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fd8c23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:40,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61549df4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:40,579 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:40,579 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:40,579 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f) service to localhost/127.0.0.1:33823 2024-11-19T18:30:40,579 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:40,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data1/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:40,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data2/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:40,581 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:40,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:40,593 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:40,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:40,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:40,594 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:30:40,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1763b2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:40,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3903d405{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:40,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:40,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:40,646 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:40,649 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cfad5cfca4328e3 with lease ID 0x8faebf225cee07d3: from storage DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f node DatanodeRegistration(127.0.0.1:40151, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=42089, infoSecurePort=0, ipcPort=36699, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:40,649 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cfad5cfca4328e3 with lease ID 0x8faebf225cee07d3: from storage DS-53026cb4-3f98-49a5-bc3c-80e46a27a555 node DatanodeRegistration(127.0.0.1:40151, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=42089, infoSecurePort=0, ipcPort=36699, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:40,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55b48cd0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-39525-hadoop-hdfs-3_4_1-tests_jar-_-any-5479485327824275979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:40,713 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62f6e774{HTTP/1.1, (http/1.1)}{localhost:39525} 2024-11-19T18:30:40,713 INFO [Time-limited test {}] server.Server(415): Started @164096ms 2024-11-19T18:30:40,715 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:40,819 WARN [Thread-1363 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:40,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4a1aaee1fa6c817 with lease ID 0x8faebf225cee07d4: from storage DS-59a3f465-3acf-4467-8d03-c261fe36cdfa node DatanodeRegistration(127.0.0.1:43541, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=45481, infoSecurePort=0, ipcPort=44139, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:40,822 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4a1aaee1fa6c817 with lease ID 0x8faebf225cee07d4: from storage DS-c9fcb9ea-7714-404e-a095-2693abfc2538 node DatanodeRegistration(127.0.0.1:43541, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=45481, infoSecurePort=0, ipcPort=44139, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:41,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:41,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:41,733 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-19T18:30:41,735 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-19T18:30:41,736 ERROR [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:41,737 WARN [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:41,737 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C35573%2C1732041027434:(num 1732041027818) roll requested 2024-11-19T18:30:41,737 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:41,742 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 newFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:41,743 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:41,743 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:41,743 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:41,743 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:41,743 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:41,743 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:41,744 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:41,744 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:41,744 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:41,744 WARN [IPC Server handler 3 on default port 33823 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-19T18:30:41,744 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45481:45481),(127.0.0.1/127.0.0.1:42089:42089)] 2024-11-19T18:30:41,745 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 is not closed yet, will try archiving it next time 2024-11-19T18:30:41,745 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 after 0ms 2024-11-19T18:30:42,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:42,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:42,650 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T18:30:43,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:43,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:43,748 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-19T18:30:44,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:44,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:45,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:45,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:45,745 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 after 4001ms 2024-11-19T18:30:45,751 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:45,751 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43541,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK], DatanodeInfoWithStorage[127.0.0.1:40151,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43541,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]) is bad. 2024-11-19T18:30:45,752 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:37610 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43541:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37610 dst: /127.0.0.1:43541 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:45,752 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:49822 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49822 dst: /127.0.0.1:40151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:45,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55b48cd0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:45,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62f6e774{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:45,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:45,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3903d405{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:45,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1763b2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:45,755 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:45,755 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f) service to localhost/127.0.0.1:33823 2024-11-19T18:30:45,755 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:45,755 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:45,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data1/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:45,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data2/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:45,756 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:45,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:45,770 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:45,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:45,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:45,771 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:30:45,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bc55163{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:45,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dcfabbb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:45,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4165df3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-40935-hadoop-hdfs-3_4_1-tests_jar-_-any-4024203113033184576/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:45,887 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32c717fb{HTTP/1.1, (http/1.1)}{localhost:40935} 2024-11-19T18:30:45,887 INFO [Time-limited test {}] server.Server(415): Started @169270ms 2024-11-19T18:30:45,888 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:45,913 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:45,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1916520685_22 at /127.0.0.1:49836 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49836 dst: /127.0.0.1:40151 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:45,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c49b856{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:45,915 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@431e378c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:30:45,915 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:30:45,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7aa94e72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:30:45,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@644dfd01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:30:45,917 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:30:45,917 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 202824d4-c4bf-427f-8c69-c05a8c2b3668) service to localhost/127.0.0.1:33823 2024-11-19T18:30:45,917 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:30:45,917 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:30:45,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data3/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:45,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data4/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:30:45,918 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:30:45,931 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:30:45,935 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:30:45,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:30:45,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:30:45,936 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:30:45,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4307cd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:30:45,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21404da7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:30:45,989 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:45,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe578da9c15e9288d with lease ID 0x8faebf225cee07d5: from storage DS-59a3f465-3acf-4467-8d03-c261fe36cdfa node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35009, infoSecurePort=0, ipcPort=36641, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:45,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe578da9c15e9288d with lease ID 0x8faebf225cee07d5: from storage DS-c9fcb9ea-7714-404e-a095-2693abfc2538 node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f, infoPort=35009, infoSecurePort=0, ipcPort=36641, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:46,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3affdf32{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/java.io.tmpdir/jetty-localhost-44365-hadoop-hdfs-3_4_1-tests_jar-_-any-10996105068925307204/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:30:46,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ebbad67{HTTP/1.1, (http/1.1)}{localhost:44365} 2024-11-19T18:30:46,060 INFO [Time-limited test {}] server.Server(415): Started @169442ms 2024-11-19T18:30:46,062 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:30:46,153 WARN [Thread-1437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:30:46,155 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60a76c53f26beb98 with lease ID 0x8faebf225cee07d6: from storage DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f node DatanodeRegistration(127.0.0.1:34217, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36963, infoSecurePort=0, ipcPort=39937, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:30:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60a76c53f26beb98 with lease ID 0x8faebf225cee07d6: from storage DS-53026cb4-3f98-49a5-bc3c-80e46a27a555 node DatanodeRegistration(127.0.0.1:34217, datanodeUuid=202824d4-c4bf-427f-8c69-c05a8c2b3668, infoPort=36963, infoSecurePort=0, ipcPort=39937, storageInfo=lv=-57;cid=testClusterID;nsid=277514293;c=1732041026642), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T18:30:46,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:46,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:47,080 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-19T18:30:47,082 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-19T18:30:47,083 ERROR [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40151,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:47,083 WARN [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40151,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:47,083 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C35573%2C1732041027434:(num 1732041041737) roll requested 2024-11-19T18:30:47,084 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:47,089 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 newFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:47,089 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:47,089 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:47,089 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:47,089 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:47,090 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:47,090 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:47,090 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40151,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:47,090 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40151,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:47,090 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:47,090 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36963:36963),(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-19T18:30:47,090 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 is not closed yet, will try archiving it next time 2024-11-19T18:30:47,090 WARN [IPC Server handler 4 on default port 33823 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-19T18:30:47,091 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 after 1ms 2024-11-19T18:30:47,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:47,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:48,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:48,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:49,092 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:49,098 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 newFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:49,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:49,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:49,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:49,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:49,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:49,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:49,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741838_1019 (size=1264) 2024-11-19T18:30:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741838_1019 (size=1264) 2024-11-19T18:30:49,101 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:36963:36963)] 2024-11-19T18:30:49,101 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 is not closed yet, will try archiving it next time 2024-11-19T18:30:49,101 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 is not closed yet, will try archiving it next time 2024-11-19T18:30:49,102 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:49,102 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:49,102 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 after 0ms 2024-11-19T18:30:49,102 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:49,112 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732041028720/Put/vlen=218/seqid=0] 2024-11-19T18:30:49,113 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732041038400/Put/vlen=1045/seqid=0] 2024-11-19T18:30:49,113 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041027818 2024-11-19T18:30:49,113 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:49,113 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:49,113 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 after 0ms 2024-11-19T18:30:49,113 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:49,116 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732041041736/Put/vlen=1045/seqid=0] 2024-11-19T18:30:49,116 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732041043749/Put/vlen=1045/seqid=0] 2024-11-19T18:30:49,116 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 2024-11-19T18:30:49,116 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:49,116 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:49,117 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 after 1ms 2024-11-19T18:30:49,117 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041047083 2024-11-19T18:30:49,120 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732041047083/Put/vlen=1045/seqid=0] 2024-11-19T18:30:49,120 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:49,120 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:49,121 WARN [IPC Server handler 1 on default port 33823 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-19T18:30:49,121 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 after 1ms 2024-11-19T18:30:49,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:49,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:49,992 WARN [ResponseProcessor for block BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:49,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_857295712_22 at /127.0.0.1:33868 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33868 dst: /127.0.0.1:44591 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44591 remote=/127.0.0.1:33868]. Total timeout mills is 60000, 59105 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:49,993 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 block BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK], DatanodeInfoWithStorage[127.0.0.1:34217,DS-8ef549a8-d837-4cfe-b7ee-a9f39f964c0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44591,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]) is bad. 2024-11-19T18:30:49,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_857295712_22 at /127.0.0.1:44048 [Receiving block BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44048 dst: /127.0.0.1:34217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:30:49,994 WARN [DataStreamer for file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 block BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:49,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741839_1022 (size=85) 2024-11-19T18:30:50,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741839_1022 (size=85) 2024-11-19T18:30:50,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:50,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:50,991 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T18:30:51,092 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041041737 after 4002ms 2024-11-19T18:30:51,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:51,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:52,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:52,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:53,122 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 after 4001ms 2024-11-19T18:30:53,122 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:53,125 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:53,126 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d21be12d123699a43e78f95ae943248c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-19T18:30:53,126 ERROR [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,127 WARN [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,127 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C35573%2C1732041027434:(num 1732041049092) roll requested 2024-11-19T18:30:53,127 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.1732041053127 2024-11-19T18:30:53,132 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 newFile=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041053127 2024-11-19T18:30:53,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,133 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041053127 2024-11-19T18:30:53,133 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,134 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1402508832-172.17.0.2-1732041026642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,134 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:53,134 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 after 0ms 2024-11-19T18:30:53,135 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.1732041049092 to hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs/30db5f576be8%2C35573%2C1732041027434.1732041049092 2024-11-19T18:30:53,135 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:36963:36963)] 2024-11-19T18:30:53,151 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/.tmp/info/947d8ea910484224b6ac997e31cb01a5 is 1080, key is row1002/info:/1732041038400/Put/seqid=0 2024-11-19T18:30:53,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741841_1024 (size=9270) 2024-11-19T18:30:53,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741841_1024 (size=9270) 2024-11-19T18:30:53,157 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/.tmp/info/947d8ea910484224b6ac997e31cb01a5 2024-11-19T18:30:53,167 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/.tmp/info/947d8ea910484224b6ac997e31cb01a5 as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/info/947d8ea910484224b6ac997e31cb01a5 2024-11-19T18:30:53,173 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/info/947d8ea910484224b6ac997e31cb01a5, entries=4, sequenceid=8, filesize=9.1 K 2024-11-19T18:30:53,174 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for d21be12d123699a43e78f95ae943248c in 48ms, sequenceid=8, compaction requested=false 2024-11-19T18:30:53,174 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d21be12d123699a43e78f95ae943248c: 2024-11-19T18:30:53,174 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-19T18:30:53,174 ERROR [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,175 WARN [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780-prefix:30db5f576be8,35573,1732041027434.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,175 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C35573%2C1732041027434.meta:.meta(num 1732041028211) roll requested 2024-11-19T18:30:53,175 INFO [regionserver/30db5f576be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C35573%2C1732041027434.meta.1732041053175.meta 2024-11-19T18:30:53,180 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,181 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041053175.meta 2024-11-19T18:30:53,183 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,183 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:53,183 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta 2024-11-19T18:30:53,183 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36963:36963),(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-19T18:30:53,183 DEBUG [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta is not closed yet, will try archiving it next time 2024-11-19T18:30:53,183 WARN [IPC Server handler 3 on default port 33823 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-19T18:30:53,184 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta after 1ms 2024-11-19T18:30:53,199 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/info/fd88307ed1b64198a08deb82dbee961c is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c./info:regioninfo/1732041028725/Put/seqid=0 2024-11-19T18:30:53,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741843_1027 (size=7125) 2024-11-19T18:30:53,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741843_1027 (size=7125) 2024-11-19T18:30:53,205 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/info/fd88307ed1b64198a08deb82dbee961c 2024-11-19T18:30:53,226 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/ns/d1c2bb8b5c2640898a3c51aaa8adbe90 is 43, key is default/ns:d/1732041028276/Put/seqid=0 2024-11-19T18:30:53,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741844_1028 (size=5153) 2024-11-19T18:30:53,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741844_1028 (size=5153) 2024-11-19T18:30:53,232 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/ns/d1c2bb8b5c2640898a3c51aaa8adbe90 2024-11-19T18:30:53,251 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/table/7928350a8f1c4f4db8780f535559d033 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732041028736/Put/seqid=0 2024-11-19T18:30:53,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741845_1029 (size=5438) 2024-11-19T18:30:53,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741845_1029 (size=5438) 2024-11-19T18:30:53,257 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/table/7928350a8f1c4f4db8780f535559d033 2024-11-19T18:30:53,263 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/info/fd88307ed1b64198a08deb82dbee961c as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/info/fd88307ed1b64198a08deb82dbee961c 2024-11-19T18:30:53,268 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/info/fd88307ed1b64198a08deb82dbee961c, entries=10, sequenceid=11, filesize=7.0 K 2024-11-19T18:30:53,269 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/ns/d1c2bb8b5c2640898a3c51aaa8adbe90 as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/ns/d1c2bb8b5c2640898a3c51aaa8adbe90 2024-11-19T18:30:53,274 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/ns/d1c2bb8b5c2640898a3c51aaa8adbe90, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T18:30:53,274 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/.tmp/table/7928350a8f1c4f4db8780f535559d033 as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/table/7928350a8f1c4f4db8780f535559d033 2024-11-19T18:30:53,279 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/table/7928350a8f1c4f4db8780f535559d033, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T18:30:53,280 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 106ms, sequenceid=11, compaction requested=false 2024-11-19T18:30:53,280 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T18:30:53,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:30:53,288 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:30:53,288 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:53,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:53,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:53,289 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:30:53,289 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:30:53,289 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1430893487, stopped=false 2024-11-19T18:30:53,289 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,44045,1732041027384 2024-11-19T18:30:53,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:53,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:30:53,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:53,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:53,291 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:30:53,291 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:30:53,291 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:53,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:53,291 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,35573,1732041027434' ***** 2024-11-19T18:30:53,291 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:30:53,291 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:53,291 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:30:53,291 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:30:53,292 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(3091): Received CLOSE for d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,35573,1732041027434 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:35573. 2024-11-19T18:30:53,292 DEBUG [RS:0;30db5f576be8:35573 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:30:53,292 DEBUG [RS:0;30db5f576be8:35573 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:53,292 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d21be12d123699a43e78f95ae943248c, disabling compactions & flushes 2024-11-19T18:30:53,292 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:53,292 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:30:53,292 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. after waiting 0 ms 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:30:53,292 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:30:53,292 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:30:53,293 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T18:30:53,293 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1325): Online Regions={d21be12d123699a43e78f95ae943248c=TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T18:30:53,293 DEBUG [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d21be12d123699a43e78f95ae943248c 2024-11-19T18:30:53,293 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:30:53,293 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:30:53,293 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:30:53,293 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:30:53,293 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:30:53,297 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/default/TestLogRolling-testLogRollOnPipelineRestart/d21be12d123699a43e78f95ae943248c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-19T18:30:53,297 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T18:30:53,297 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:53,297 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d21be12d123699a43e78f95ae943248c: Waiting for close lock at 1732041053292Running coprocessor pre-close hooks at 1732041053292Disabling compacts and flushes for region at 1732041053292Disabling writes for close at 1732041053292Writing region close event to WAL at 1732041053293 (+1 ms)Running coprocessor post-close hooks at 1732041053297 (+4 ms)Closed at 1732041053297 2024-11-19T18:30:53,297 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732041028365.d21be12d123699a43e78f95ae943248c. 2024-11-19T18:30:53,298 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:30:53,298 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:30:53,298 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041053293Running coprocessor pre-close hooks at 1732041053293Disabling compacts and flushes for region at 1732041053293Disabling writes for close at 1732041053293Writing region close event to WAL at 1732041053294 (+1 ms)Running coprocessor post-close hooks at 1732041053297 (+3 ms)Closed at 1732041053298 (+1 ms) 2024-11-19T18:30:53,298 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:30:53,493 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,35573,1732041027434; all regions closed. 2024-11-19T18:30:53,494 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,494 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,494 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,494 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,494 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:53,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741842_1025 (size=825) 2024-11-19T18:30:53,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741842_1025 (size=825) 2024-11-19T18:30:53,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:53,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:53,682 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T18:30:53,682 INFO [regionserver/30db5f576be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T18:30:53,683 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:54,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:54,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:55,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:55,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:56,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:56,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:57,156 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T18:30:57,184 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta after 4001ms 2024-11-19T18:30:57,185 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/WALs/30db5f576be8,35573,1732041027434/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta to hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs/30db5f576be8%2C35573%2C1732041027434.meta.1732041028211.meta 2024-11-19T18:30:57,188 DEBUG [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs 2024-11-19T18:30:57,188 INFO [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C35573%2C1732041027434.meta:.meta(num 1732041053175) 2024-11-19T18:30:57,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,189 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,189 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741840_1023 (size=1162) 2024-11-19T18:30:57,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741840_1023 (size=1162) 2024-11-19T18:30:57,195 DEBUG [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs 2024-11-19T18:30:57,195 INFO [RS:0;30db5f576be8:35573 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C35573%2C1732041027434:(num 1732041053127) 2024-11-19T18:30:57,195 DEBUG [RS:0;30db5f576be8:35573 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:30:57,195 INFO [RS:0;30db5f576be8:35573 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:30:57,195 INFO [RS:0;30db5f576be8:35573 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:30:57,195 INFO [RS:0;30db5f576be8:35573 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:30:57,196 INFO [RS:0;30db5f576be8:35573 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:30:57,196 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:30:57,196 INFO [RS:0;30db5f576be8:35573 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35573 2024-11-19T18:30:57,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,35573,1732041027434 2024-11-19T18:30:57,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:30:57,198 INFO [RS:0;30db5f576be8:35573 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:30:57,199 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,35573,1732041027434] 2024-11-19T18:30:57,202 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,35573,1732041027434 already deleted, retry=false 2024-11-19T18:30:57,202 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,35573,1732041027434 expired; onlineServers=0 2024-11-19T18:30:57,202 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,44045,1732041027384' ***** 2024-11-19T18:30:57,202 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:30:57,202 INFO [M:0;30db5f576be8:44045 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:30:57,202 INFO [M:0;30db5f576be8:44045 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:30:57,202 DEBUG [M:0;30db5f576be8:44045 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:30:57,202 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:30:57,202 DEBUG [M:0;30db5f576be8:44045 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:30:57,202 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041027600 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041027600,5,FailOnTimeoutGroup] 2024-11-19T18:30:57,202 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041027601 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041027601,5,FailOnTimeoutGroup] 2024-11-19T18:30:57,202 INFO [M:0;30db5f576be8:44045 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:30:57,202 INFO [M:0;30db5f576be8:44045 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:30:57,202 DEBUG [M:0;30db5f576be8:44045 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:30:57,202 INFO [M:0;30db5f576be8:44045 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:30:57,203 INFO [M:0;30db5f576be8:44045 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:30:57,203 INFO [M:0;30db5f576be8:44045 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:30:57,203 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:30:57,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:30:57,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:30:57,204 DEBUG [M:0;30db5f576be8:44045 {}] zookeeper.ZKUtil(347): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:30:57,204 WARN [M:0;30db5f576be8:44045 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:30:57,204 INFO [M:0;30db5f576be8:44045 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/.lastflushedseqids 2024-11-19T18:30:57,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741846_1030 (size=111) 2024-11-19T18:30:57,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741846_1030 (size=111) 2024-11-19T18:30:57,216 INFO [M:0;30db5f576be8:44045 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:30:57,216 INFO [M:0;30db5f576be8:44045 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:30:57,216 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:30:57,216 INFO [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:57,216 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:57,216 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:30:57,216 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:57,217 INFO [M:0;30db5f576be8:44045 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-19T18:30:57,217 ERROR [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData-prefix:30db5f576be8,44045,1732041027384 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:57,217 WARN [FSHLog-0-hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData-prefix:30db5f576be8,44045,1732041027384 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:57,217 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 30db5f576be8%2C44045%2C1732041027384:(num 1732041027523) roll requested 2024-11-19T18:30:57,217 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C44045%2C1732041027384.1732041057217 2024-11-19T18:30:57,222 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,222 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,223 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041057217 2024-11-19T18:30:57,223 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:57,223 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41671,DS-59a3f465-3acf-4467-8d03-c261fe36cdfa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T18:30:57,223 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 2024-11-19T18:30:57,224 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:36963:36963)] 2024-11-19T18:30:57,224 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 is not closed yet, will try archiving it next time 2024-11-19T18:30:57,224 WARN [IPC Server handler 0 on default port 33823 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-19T18:30:57,224 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 after 1ms 2024-11-19T18:30:57,239 DEBUG [M:0;30db5f576be8:44045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8deb5982b113419ba8f7256b83858a9d is 82, key is hbase:meta,,1/info:regioninfo/1732041028258/Put/seqid=0 2024-11-19T18:30:57,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741848_1033 (size=5672) 2024-11-19T18:30:57,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741848_1033 (size=5672) 2024-11-19T18:30:57,244 INFO [M:0;30db5f576be8:44045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8deb5982b113419ba8f7256b83858a9d 2024-11-19T18:30:57,263 DEBUG [M:0;30db5f576be8:44045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/666e75f91b174bcf9e9d0a319fc3140d is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732041028741/Put/seqid=0 2024-11-19T18:30:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741849_1034 (size=6118) 2024-11-19T18:30:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741849_1034 (size=6118) 2024-11-19T18:30:57,268 INFO [M:0;30db5f576be8:44045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/666e75f91b174bcf9e9d0a319fc3140d 2024-11-19T18:30:57,286 DEBUG [M:0;30db5f576be8:44045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85d59dd1349a4625a995b1eb0ca57c7b is 69, key is 30db5f576be8,35573,1732041027434/rs:state/1732041027670/Put/seqid=0 2024-11-19T18:30:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741850_1035 (size=5156) 2024-11-19T18:30:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741850_1035 (size=5156) 2024-11-19T18:30:57,291 INFO [M:0;30db5f576be8:44045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85d59dd1349a4625a995b1eb0ca57c7b 2024-11-19T18:30:57,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:57,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35573-0x101317e99b90001, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:30:57,299 INFO [RS:0;30db5f576be8:35573 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:30:57,299 INFO [RS:0;30db5f576be8:35573 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,35573,1732041027434; zookeeper connection closed. 2024-11-19T18:30:57,300 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68a2d8d4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68a2d8d4 2024-11-19T18:30:57,300 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:30:57,309 DEBUG [M:0;30db5f576be8:44045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4ceec399cae4365b80014280c7488d0 is 52, key is load_balancer_on/state:d/1732041028361/Put/seqid=0 2024-11-19T18:30:57,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741851_1036 (size=5056) 2024-11-19T18:30:57,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741851_1036 (size=5056) 2024-11-19T18:30:57,314 INFO [M:0;30db5f576be8:44045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4ceec399cae4365b80014280c7488d0 2024-11-19T18:30:57,320 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8deb5982b113419ba8f7256b83858a9d as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8deb5982b113419ba8f7256b83858a9d 2024-11-19T18:30:57,324 INFO [M:0;30db5f576be8:44045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8deb5982b113419ba8f7256b83858a9d, entries=8, sequenceid=56, filesize=5.5 K 2024-11-19T18:30:57,325 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/666e75f91b174bcf9e9d0a319fc3140d as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/666e75f91b174bcf9e9d0a319fc3140d 2024-11-19T18:30:57,330 INFO [M:0;30db5f576be8:44045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/666e75f91b174bcf9e9d0a319fc3140d, entries=6, sequenceid=56, filesize=6.0 K 2024-11-19T18:30:57,330 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85d59dd1349a4625a995b1eb0ca57c7b as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/85d59dd1349a4625a995b1eb0ca57c7b 2024-11-19T18:30:57,335 INFO [M:0;30db5f576be8:44045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/85d59dd1349a4625a995b1eb0ca57c7b, entries=1, sequenceid=56, filesize=5.0 K 2024-11-19T18:30:57,336 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4ceec399cae4365b80014280c7488d0 as hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4ceec399cae4365b80014280c7488d0 2024-11-19T18:30:57,340 INFO [M:0;30db5f576be8:44045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4ceec399cae4365b80014280c7488d0, entries=1, sequenceid=56, filesize=4.9 K 2024-11-19T18:30:57,341 INFO [M:0;30db5f576be8:44045 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false 2024-11-19T18:30:57,343 INFO [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:30:57,343 DEBUG [M:0;30db5f576be8:44045 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041057216Disabling compacts and flushes for region at 1732041057216Disabling writes for close at 1732041057216Obtaining lock to block concurrent updates at 1732041057217 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732041057217Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732041057217Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732041057224 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732041057224Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732041057238 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732041057238Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732041057249 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732041057263 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732041057263Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732041057272 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732041057286 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732041057286Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732041057295 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732041057309 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732041057309Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@120cf8d7: reopening flushed file at 1732041057319 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48733d25: reopening flushed file at 1732041057324 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bd4d444: reopening flushed file at 1732041057330 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@341c1fc8: reopening flushed file at 1732041057335 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false at 1732041057341 (+6 ms)Writing region close event to WAL at 1732041057343 (+2 ms)Closed at 1732041057343 2024-11-19T18:30:57,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:30:57,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34217 is added to blk_1073741847_1031 (size=757) 2024-11-19T18:30:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741847_1031 (size=757) 2024-11-19T18:30:57,367 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:30:57,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:58,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:58,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:58,823 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:30:58,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:58,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:30:59,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:30:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:00,156 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T18:31:00,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:00,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:01,225 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 after 4002ms 2024-11-19T18:31:01,225 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/WALs/30db5f576be8,44045,1732041027384/30db5f576be8%2C44045%2C1732041027384.1732041027523 to hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/oldWALs/30db5f576be8%2C44045%2C1732041027384.1732041027523 2024-11-19T18:31:01,228 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/MasterData/oldWALs/30db5f576be8%2C44045%2C1732041027384.1732041027523 to hdfs://localhost:33823/user/jenkins/test-data/50601261-0e5d-163b-ae4d-26aede44a780/oldWALs/30db5f576be8%2C44045%2C1732041027384.1732041027523$masterlocalwal$ 2024-11-19T18:31:01,228 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:31:01,228 INFO [M:0;30db5f576be8:44045 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:31:01,228 INFO [M:0;30db5f576be8:44045 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44045 2024-11-19T18:31:01,228 INFO [M:0;30db5f576be8:44045 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:31:01,330 INFO [M:0;30db5f576be8:44045 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:31:01,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:01,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44045-0x101317e99b90000, quorum=127.0.0.1:64240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:01,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3affdf32{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:01,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ebbad67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:01,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:01,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21404da7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:01,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4307cd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:01,334 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:31:01,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:31:01,334 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:31:01,335 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 202824d4-c4bf-427f-8c69-c05a8c2b3668) service to localhost/127.0.0.1:33823 2024-11-19T18:31:01,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data3/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:01,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data4/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:01,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:31:01,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4165df3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:01,338 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32c717fb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:01,338 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:01,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dcfabbb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:01,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bc55163{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:01,339 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:31:01,339 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:31:01,339 WARN [BP-1402508832-172.17.0.2-1732041026642 heartbeating to localhost/127.0.0.1:33823 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1402508832-172.17.0.2-1732041026642 (Datanode Uuid 8f2ad7c0-c851-4de4-8e71-e9e1ad8d807f) service to localhost/127.0.0.1:33823 2024-11-19T18:31:01,339 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:31:01,340 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data1/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:01,340 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/cluster_e8df85f9-7bba-c300-bd77-abff4809ddc7/data/data2/current/BP-1402508832-172.17.0.2-1732041026642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:01,340 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:31:01,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45a54d9c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:31:01,346 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@421a8f73{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:01,346 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:01,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24710539{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:01,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e526681{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:01,353 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:31:01,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:31:01,378 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33823 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33823 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33823 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33823 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33823 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33823 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33823 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33823 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=123 (was 213), ProcessCount=11 (was 11), AvailableMemoryMB=6939 (was 7150) 2024-11-19T18:31:01,385 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=123, ProcessCount=11, AvailableMemoryMB=6939 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.log.dir so I do NOT create it in target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/04aeff59-9971-c928-c4bd-8209340f0ee2/hadoop.tmp.dir so I do NOT create it in target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd, deleteOnExit=true 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/test.cache.data in system properties and HBase conf 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:31:01,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:31:01,386 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:31:01,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:31:01,400 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:31:01,475 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:01,479 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:01,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:01,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:01,480 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:31:01,481 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:01,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1159c3f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:01,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56673fe9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:01,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@520bb90c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/java.io.tmpdir/jetty-localhost-39747-hadoop-hdfs-3_4_1-tests_jar-_-any-15710344271007367716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:31:01,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79c156a5{HTTP/1.1, (http/1.1)}{localhost:39747} 2024-11-19T18:31:01,597 INFO [Time-limited test {}] server.Server(415): Started @184979ms 2024-11-19T18:31:01,610 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:31:01,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:01,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:01,667 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:01,670 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:01,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:01,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:01,671 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:31:01,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68744dfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:01,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521506bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:01,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f08894b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/java.io.tmpdir/jetty-localhost-46309-hadoop-hdfs-3_4_1-tests_jar-_-any-12596271566534997992/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:01,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ac0122b{HTTP/1.1, (http/1.1)}{localhost:46309} 2024-11-19T18:31:01,790 INFO [Time-limited test {}] server.Server(415): Started @185172ms 2024-11-19T18:31:01,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:31:01,827 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:01,830 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:01,831 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:01,831 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:01,831 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:31:01,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@374dfdaf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:01,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7101d128{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:01,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:31:01,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:31:01,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T18:31:01,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T18:31:01,907 WARN [Thread-1631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data1/current/BP-764291916-172.17.0.2-1732041061417/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:01,907 WARN [Thread-1632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data2/current/BP-764291916-172.17.0.2-1732041061417/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:01,930 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:31:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf35f57c7d2dc9c55 with lease ID 0xadb28888cd9311a1: Processing first storage report for DS-e719a9b2-ddb6-47cb-9ef5-73405356e11e from datanode DatanodeRegistration(127.0.0.1:41887, datanodeUuid=ed0b9b24-7ca1-4733-ac59-6960c903d193, infoPort=46843, infoSecurePort=0, ipcPort=45065, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417) 2024-11-19T18:31:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf35f57c7d2dc9c55 with lease ID 0xadb28888cd9311a1: from storage DS-e719a9b2-ddb6-47cb-9ef5-73405356e11e node DatanodeRegistration(127.0.0.1:41887, datanodeUuid=ed0b9b24-7ca1-4733-ac59-6960c903d193, infoPort=46843, infoSecurePort=0, ipcPort=45065, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf35f57c7d2dc9c55 with lease ID 0xadb28888cd9311a1: Processing first storage report for DS-a4f1e10f-697e-428c-a634-84dd26505d4e from datanode DatanodeRegistration(127.0.0.1:41887, datanodeUuid=ed0b9b24-7ca1-4733-ac59-6960c903d193, infoPort=46843, infoSecurePort=0, ipcPort=45065, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417) 2024-11-19T18:31:01,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf35f57c7d2dc9c55 with lease ID 0xadb28888cd9311a1: from storage DS-a4f1e10f-697e-428c-a634-84dd26505d4e node DatanodeRegistration(127.0.0.1:41887, datanodeUuid=ed0b9b24-7ca1-4733-ac59-6960c903d193, infoPort=46843, infoSecurePort=0, ipcPort=45065, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:01,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8afa355{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/java.io.tmpdir/jetty-localhost-34201-hadoop-hdfs-3_4_1-tests_jar-_-any-9295257273353603277/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:01,954 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39212263{HTTP/1.1, (http/1.1)}{localhost:34201} 2024-11-19T18:31:01,954 INFO [Time-limited test {}] server.Server(415): Started @185337ms 2024-11-19T18:31:01,956 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:31:02,057 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data3/current/BP-764291916-172.17.0.2-1732041061417/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:02,057 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data4/current/BP-764291916-172.17.0.2-1732041061417/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:02,074 WARN [Thread-1646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:31:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d529cbe479c640e with lease ID 0xadb28888cd9311a2: Processing first storage report for DS-19705324-148e-43ca-a06c-ab28517de7a8 from datanode DatanodeRegistration(127.0.0.1:34981, datanodeUuid=46adfa70-b3b2-45a1-918f-6cc21495ecef, infoPort=42521, infoSecurePort=0, ipcPort=40745, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417) 2024-11-19T18:31:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d529cbe479c640e with lease ID 0xadb28888cd9311a2: from storage DS-19705324-148e-43ca-a06c-ab28517de7a8 node DatanodeRegistration(127.0.0.1:34981, datanodeUuid=46adfa70-b3b2-45a1-918f-6cc21495ecef, infoPort=42521, infoSecurePort=0, ipcPort=40745, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d529cbe479c640e with lease ID 0xadb28888cd9311a2: Processing first storage report for DS-559608ad-ead2-4fb1-8943-c804f341374c from datanode DatanodeRegistration(127.0.0.1:34981, datanodeUuid=46adfa70-b3b2-45a1-918f-6cc21495ecef, infoPort=42521, infoSecurePort=0, ipcPort=40745, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417) 2024-11-19T18:31:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d529cbe479c640e with lease ID 0xadb28888cd9311a2: from storage DS-559608ad-ead2-4fb1-8943-c804f341374c node DatanodeRegistration(127.0.0.1:34981, datanodeUuid=46adfa70-b3b2-45a1-918f-6cc21495ecef, infoPort=42521, infoSecurePort=0, ipcPort=40745, storageInfo=lv=-57;cid=testClusterID;nsid=839095175;c=1732041061417), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:02,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8 2024-11-19T18:31:02,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/zookeeper_0, clientPort=51554, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:31:02,082 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51554 2024-11-19T18:31:02,082 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:31:02,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:31:02,092 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f with version=8 2024-11-19T18:31:02,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:31:02,094 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:31:02,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:31:02,095 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39295 2024-11-19T18:31:02,097 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39295 connecting to ZooKeeper ensemble=127.0.0.1:51554 2024-11-19T18:31:02,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392950x0, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:31:02,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39295-0x101317f21500000 connected 2024-11-19T18:31:02,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:02,125 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f, hbase.cluster.distributed=false 2024-11-19T18:31:02,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:31:02,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39295 2024-11-19T18:31:02,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39295 2024-11-19T18:31:02,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39295 2024-11-19T18:31:02,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39295 2024-11-19T18:31:02,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39295 2024-11-19T18:31:02,143 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:31:02,143 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:31:02,144 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:31:02,144 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38405 2024-11-19T18:31:02,146 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38405 connecting to ZooKeeper ensemble=127.0.0.1:51554 2024-11-19T18:31:02,146 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,148 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384050x0, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:31:02,152 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384050x0, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:02,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38405-0x101317f21500001 connected 2024-11-19T18:31:02,152 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:31:02,153 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:31:02,154 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:31:02,154 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:31:02,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38405 2024-11-19T18:31:02,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38405 2024-11-19T18:31:02,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38405 2024-11-19T18:31:02,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38405 2024-11-19T18:31:02,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38405 2024-11-19T18:31:02,167 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:39295 2024-11-19T18:31:02,168 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:02,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:02,170 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:31:02,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,172 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:31:02,172 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,39295,1732041062094 from backup master directory 2024-11-19T18:31:02,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:02,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:02,175 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:31:02,175 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,179 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/hbase.id] with ID: 3b76a9c8-f3a5-43c3-afd0-549e4a020d57 2024-11-19T18:31:02,180 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/.tmp/hbase.id 2024-11-19T18:31:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:31:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:31:02,185 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/.tmp/hbase.id]:[hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/hbase.id] 2024-11-19T18:31:02,196 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:02,196 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:31:02,198 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T18:31:02,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:31:02,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:31:02,210 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:31:02,211 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:31:02,212 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:02,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:31:02,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:31:02,221 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store 2024-11-19T18:31:02,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:31:02,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:31:02,230 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:02,230 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:02,230 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041062230Disabling compacts and flushes for region at 1732041062230Disabling writes for close at 1732041062230Writing region close event to WAL at 1732041062230Closed at 1732041062230 2024-11-19T18:31:02,231 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/.initializing 2024-11-19T18:31:02,231 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/WALs/30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,233 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C39295%2C1732041062094, suffix=, logDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/WALs/30db5f576be8,39295,1732041062094, archiveDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/oldWALs, maxLogs=10 2024-11-19T18:31:02,234 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C39295%2C1732041062094.1732041062234 2024-11-19T18:31:02,238 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/WALs/30db5f576be8,39295,1732041062094/30db5f576be8%2C39295%2C1732041062094.1732041062234 2024-11-19T18:31:02,239 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46843:46843),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-19T18:31:02,239 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:02,240 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:02,240 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,240 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,241 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:31:02,242 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:31:02,244 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:02,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:31:02,245 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:02,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:31:02,247 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:02,247 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,248 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,248 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,249 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,249 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,250 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:31:02,251 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:02,253 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:02,253 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849901, jitterRate=0.08070600032806396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:31:02,254 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732041062240Initializing all the Stores at 1732041062241 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062241Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041062241Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041062241Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041062241Cleaning up temporary data from old regions at 1732041062249 (+8 ms)Region opened successfully at 1732041062254 (+5 ms) 2024-11-19T18:31:02,254 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:31:02,257 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4391e7de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:31:02,257 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:31:02,258 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:31:02,260 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:31:02,261 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:31:02,262 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:31:02,262 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:31:02,263 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:31:02,264 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:31:02,264 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:31:02,265 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:31:02,267 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:31:02,268 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:31:02,269 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:31:02,270 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:31:02,271 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:31:02,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:02,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:02,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,273 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,39295,1732041062094, sessionid=0x101317f21500000, setting cluster-up flag (Was=false) 2024-11-19T18:31:02,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,284 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:31:02,284 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,293 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:31:02,293 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,39295,1732041062094 2024-11-19T18:31:02,294 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:31:02,296 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:02,296 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:31:02,297 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:31:02,297 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,39295,1732041062094 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:31:02,298 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,300 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:02,300 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:31:02,301 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,301 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041092303 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:31:02,303 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,304 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:31:02,304 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:31:02,304 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:31:02,304 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:31:02,304 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:31:02,306 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041062304,5,FailOnTimeoutGroup] 2024-11-19T18:31:02,306 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041062306,5,FailOnTimeoutGroup] 2024-11-19T18:31:02,306 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,306 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:31:02,306 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,306 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:31:02,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:31:02,309 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:31:02,309 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f 2024-11-19T18:31:02,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:31:02,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:31:02,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:02,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:31:02,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:31:02,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:31:02,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:31:02,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:31:02,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:31:02,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:31:02,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:31:02,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:31:02,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740 2024-11-19T18:31:02,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740 2024-11-19T18:31:02,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:31:02,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:31:02,327 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:31:02,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:31:02,330 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:02,330 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859270, jitterRate=0.09261853992938995}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732041062316Initializing all the Stores at 1732041062317 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062317Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062317Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041062317Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062317Cleaning up temporary data from old regions at 1732041062326 (+9 ms)Region opened successfully at 1732041062331 (+5 ms) 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:31:02,331 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:31:02,331 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:31:02,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041062331Disabling compacts and flushes for region at 1732041062331Disabling writes for close at 1732041062331Writing region close event to WAL at 1732041062331Closed at 1732041062331 2024-11-19T18:31:02,333 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:02,333 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:31:02,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:31:02,334 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:31:02,336 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:31:02,357 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(746): ClusterId : 3b76a9c8-f3a5-43c3-afd0-549e4a020d57 2024-11-19T18:31:02,357 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:31:02,360 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:31:02,360 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:31:02,361 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:31:02,362 DEBUG [RS:0;30db5f576be8:38405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b3a37a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:31:02,374 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:38405 2024-11-19T18:31:02,374 INFO [RS:0;30db5f576be8:38405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:31:02,374 INFO [RS:0;30db5f576be8:38405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:31:02,374 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:31:02,374 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,39295,1732041062094 with port=38405, startcode=1732041062143 2024-11-19T18:31:02,375 DEBUG [RS:0;30db5f576be8:38405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:31:02,376 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54989, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:31:02,377 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39295 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,377 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39295 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,379 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f 2024-11-19T18:31:02,379 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34875 2024-11-19T18:31:02,379 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:31:02,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:31:02,382 DEBUG [RS:0;30db5f576be8:38405 {}] zookeeper.ZKUtil(111): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,382 WARN [RS:0;30db5f576be8:38405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:31:02,382 INFO [RS:0;30db5f576be8:38405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:02,382 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,382 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,38405,1732041062143] 2024-11-19T18:31:02,385 INFO [RS:0;30db5f576be8:38405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:31:02,387 INFO [RS:0;30db5f576be8:38405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:31:02,387 INFO [RS:0;30db5f576be8:38405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:31:02,387 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,387 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:31:02,388 INFO [RS:0;30db5f576be8:38405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:31:02,388 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,388 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,388 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,388 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,388 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,388 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:31:02,389 DEBUG [RS:0;30db5f576be8:38405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:31:02,389 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,389 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,389 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,389 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,389 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,390 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,38405,1732041062143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:31:02,405 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:31:02,405 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,38405,1732041062143-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,405 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,405 INFO [RS:0;30db5f576be8:38405 {}] regionserver.Replication(171): 30db5f576be8,38405,1732041062143 started 2024-11-19T18:31:02,420 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,420 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,38405,1732041062143, RpcServer on 30db5f576be8/172.17.0.2:38405, sessionid=0x101317f21500001 2024-11-19T18:31:02,420 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:31:02,420 DEBUG [RS:0;30db5f576be8:38405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,420 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,38405,1732041062143' 2024-11-19T18:31:02,420 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,38405,1732041062143' 2024-11-19T18:31:02,421 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:31:02,422 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:31:02,422 DEBUG [RS:0;30db5f576be8:38405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:31:02,422 INFO [RS:0;30db5f576be8:38405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:31:02,422 INFO [RS:0;30db5f576be8:38405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:31:02,486 WARN [30db5f576be8:39295 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:31:02,524 INFO [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C38405%2C1732041062143, suffix=, logDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143, archiveDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs, maxLogs=32 2024-11-19T18:31:02,525 INFO [RS:0;30db5f576be8:38405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C38405%2C1732041062143.1732041062524 2024-11-19T18:31:02,530 INFO [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041062524 2024-11-19T18:31:02,531 DEBUG [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46843:46843),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-19T18:31:02,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:02,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:02,736 DEBUG [30db5f576be8:39295 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:31:02,737 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,738 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,38405,1732041062143, state=OPENING 2024-11-19T18:31:02,740 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:31:02,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:02,742 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:31:02,742 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:02,742 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:02,742 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,38405,1732041062143}] 2024-11-19T18:31:02,895 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:31:02,897 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56437, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:31:02,901 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:31:02,901 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:02,903 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C38405%2C1732041062143.meta, suffix=.meta, logDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143, archiveDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs, maxLogs=32 2024-11-19T18:31:02,904 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C38405%2C1732041062143.meta.1732041062904.meta 2024-11-19T18:31:02,908 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.meta.1732041062904.meta 2024-11-19T18:31:02,910 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46843:46843),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-19T18:31:02,910 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:31:02,911 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:31:02,911 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:31:02,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:31:02,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:31:02,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:31:02,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:31:02,915 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:31:02,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:31:02,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:31:02,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:31:02,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:02,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:02,917 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:31:02,918 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740 2024-11-19T18:31:02,918 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740 2024-11-19T18:31:02,919 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:31:02,919 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:31:02,920 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:31:02,921 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:31:02,922 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727020, jitterRate=-0.07554648816585541}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:31:02,922 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:31:02,922 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732041062911Writing region info on filesystem at 1732041062911Initializing all the Stores at 1732041062912 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062912Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062912Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041062912Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041062912Cleaning up temporary data from old regions at 1732041062919 (+7 ms)Running coprocessor post-open hooks at 1732041062922 (+3 ms)Region opened successfully at 1732041062922 2024-11-19T18:31:02,924 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732041062895 2024-11-19T18:31:02,926 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:31:02,926 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:31:02,927 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,928 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,38405,1732041062143, state=OPEN 2024-11-19T18:31:02,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:31:02,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:31:02,938 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,38405,1732041062143 2024-11-19T18:31:02,938 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:02,938 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:02,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:31:02,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,38405,1732041062143 in 196 msec 2024-11-19T18:31:02,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:31:02,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-19T18:31:02,946 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:02,946 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:31:02,948 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:31:02,948 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,38405,1732041062143, seqNum=-1] 2024-11-19T18:31:02,948 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:31:02,950 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55461, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:31:02,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 659 msec 2024-11-19T18:31:02,956 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732041062956, completionTime=-1 2024-11-19T18:31:02,956 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:31:02,956 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:31:02,958 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:31:02,958 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041122958 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041182959 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:39295, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,959 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:02,961 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.787sec 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:31:02,963 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:31:02,966 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:31:02,966 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:31:02,966 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,39295,1732041062094-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:03,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bff2115, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:03,058 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,39295,-1 for getting cluster id 2024-11-19T18:31:03,058 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:31:03,060 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3b76a9c8-f3a5-43c3-afd0-549e4a020d57' 2024-11-19T18:31:03,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:31:03,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3b76a9c8-f3a5-43c3-afd0-549e4a020d57" 2024-11-19T18:31:03,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@342a6471, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:03,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,39295,-1] 2024-11-19T18:31:03,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:31:03,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:03,062 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:31:03,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c6abbb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:03,063 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:31:03,064 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,38405,1732041062143, seqNum=-1] 2024-11-19T18:31:03,065 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:31:03,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:31:03,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,39295,1732041062094 2024-11-19T18:31:03,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:03,070 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:31:03,070 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T18:31:03,071 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 30db5f576be8,39295,1732041062094 2024-11-19T18:31:03,071 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79cd7e70 2024-11-19T18:31:03,071 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T18:31:03,072 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T18:31:03,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T18:31:03,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T18:31:03,073 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:31:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:03,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T18:31:03,075 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:03,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-19T18:31:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:31:03,076 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T18:31:03,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741835_1011 (size=405) 2024-11-19T18:31:03,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741835_1011 (size=405) 2024-11-19T18:31:03,084 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6f21dcf66b69b19f8fbfbfc2ac1cac63, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f 2024-11-19T18:31:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741836_1012 (size=88) 2024-11-19T18:31:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741836_1012 (size=88) 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6f21dcf66b69b19f8fbfbfc2ac1cac63, disabling compactions & flushes 2024-11-19T18:31:03,091 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. after waiting 0 ms 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,091 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: Waiting for close lock at 1732041063091Disabling compacts and flushes for region at 1732041063091Disabling writes for close at 1732041063091Writing region close event to WAL at 1732041063091Closed at 1732041063091 2024-11-19T18:31:03,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T18:31:03,092 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732041063092"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732041063092"}]},"ts":"1732041063092"} 2024-11-19T18:31:03,094 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T18:31:03,095 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T18:31:03,095 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041063095"}]},"ts":"1732041063095"} 2024-11-19T18:31:03,097 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-19T18:31:03,097 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6f21dcf66b69b19f8fbfbfc2ac1cac63, ASSIGN}] 2024-11-19T18:31:03,099 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6f21dcf66b69b19f8fbfbfc2ac1cac63, ASSIGN 2024-11-19T18:31:03,100 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6f21dcf66b69b19f8fbfbfc2ac1cac63, ASSIGN; state=OFFLINE, location=30db5f576be8,38405,1732041062143; forceNewPlan=false, retain=false 2024-11-19T18:31:03,250 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6f21dcf66b69b19f8fbfbfc2ac1cac63, regionState=OPENING, regionLocation=30db5f576be8,38405,1732041062143 2024-11-19T18:31:03,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6f21dcf66b69b19f8fbfbfc2ac1cac63, ASSIGN because future has completed 2024-11-19T18:31:03,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f21dcf66b69b19f8fbfbfc2ac1cac63, server=30db5f576be8,38405,1732041062143}] 2024-11-19T18:31:03,410 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,410 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6f21dcf66b69b19f8fbfbfc2ac1cac63, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:03,411 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,411 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:03,411 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,411 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,412 INFO [StoreOpener-6f21dcf66b69b19f8fbfbfc2ac1cac63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,414 INFO [StoreOpener-6f21dcf66b69b19f8fbfbfc2ac1cac63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f21dcf66b69b19f8fbfbfc2ac1cac63 columnFamilyName info 2024-11-19T18:31:03,414 DEBUG [StoreOpener-6f21dcf66b69b19f8fbfbfc2ac1cac63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:03,414 INFO [StoreOpener-6f21dcf66b69b19f8fbfbfc2ac1cac63-1 {}] regionserver.HStore(327): Store=6f21dcf66b69b19f8fbfbfc2ac1cac63/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:03,414 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,415 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,416 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,416 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,416 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,418 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,420 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:03,421 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6f21dcf66b69b19f8fbfbfc2ac1cac63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764271, jitterRate=-0.028180420398712158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:31:03,421 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:03,422 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: Running coprocessor pre-open hook at 1732041063411Writing region info on filesystem at 1732041063411Initializing all the Stores at 1732041063412 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041063412Cleaning up temporary data from old regions at 1732041063416 (+4 ms)Running coprocessor post-open hooks at 1732041063421 (+5 ms)Region opened successfully at 1732041063422 (+1 ms) 2024-11-19T18:31:03,423 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63., pid=6, masterSystemTime=1732041063406 2024-11-19T18:31:03,425 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,425 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:03,426 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6f21dcf66b69b19f8fbfbfc2ac1cac63, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,38405,1732041062143 2024-11-19T18:31:03,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f21dcf66b69b19f8fbfbfc2ac1cac63, server=30db5f576be8,38405,1732041062143 because future has completed 2024-11-19T18:31:03,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T18:31:03,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6f21dcf66b69b19f8fbfbfc2ac1cac63, server=30db5f576be8,38405,1732041062143 in 177 msec 2024-11-19T18:31:03,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T18:31:03,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6f21dcf66b69b19f8fbfbfc2ac1cac63, ASSIGN in 335 msec 2024-11-19T18:31:03,435 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T18:31:03,435 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041063435"}]},"ts":"1732041063435"} 2024-11-19T18:31:03,437 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-19T18:31:03,438 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T18:31:03,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 365 msec 2024-11-19T18:31:03,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:03,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:04,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:04,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:05,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:05,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:06,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:06,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:07,363 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:31:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:07,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:07,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:08,386 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T18:31:08,386 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-19T18:31:08,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:08,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:09,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:09,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:10,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:10,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:11,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:11,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:11,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T18:31:11,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T18:31:11,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:31:11,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T18:31:11,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T18:31:11,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T18:31:11,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:11,862 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T18:31:12,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:12,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:31:13,162 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T18:31:13,162 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-19T18:31:13,165 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:13,165 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:13,168 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63., hostname=30db5f576be8,38405,1732041062143, seqNum=2] 2024-11-19T18:31:13,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:13,182 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T18:31:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T18:31:13,184 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T18:31:13,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T18:31:13,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T18:31:13,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:13,346 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T18:31:13,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/60fa1218171948b3a188d6bb0e6b079d is 1080, key is row0001/info:/1732041073169/Put/seqid=0 2024-11-19T18:31:13,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741837_1013 (size=6033) 2024-11-19T18:31:13,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741837_1013 (size=6033) 2024-11-19T18:31:13,369 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/60fa1218171948b3a188d6bb0e6b079d 2024-11-19T18:31:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/60fa1218171948b3a188d6bb0e6b079d as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d 2024-11-19T18:31:13,380 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d, entries=1, sequenceid=5, filesize=5.9 K 2024-11-19T18:31:13,381 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 35ms, sequenceid=5, compaction requested=false 2024-11-19T18:31:13,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: 2024-11-19T18:31:13,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:13,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T18:31:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T18:31:13,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T18:31:13,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-19T18:31:13,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-11-19T18:31:13,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:13,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:14,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:14,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:15,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:15,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:16,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:16,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:17,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:17,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:18,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:18,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:19,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:19,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:20,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:20,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:21,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:21,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:22,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:22,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:23,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T18:31:23,252 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T18:31:23,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:23,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:23,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T18:31:23,258 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T18:31:23,259 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T18:31:23,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T18:31:23,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-19T18:31:23,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:23,413 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T18:31:23,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/5da2bdfbb84143398f9ea74563e6e97a is 1080, key is row0002/info:/1732041083254/Put/seqid=0 2024-11-19T18:31:23,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741838_1014 (size=6033) 2024-11-19T18:31:23,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741838_1014 (size=6033) 2024-11-19T18:31:23,424 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/5da2bdfbb84143398f9ea74563e6e97a 2024-11-19T18:31:23,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/5da2bdfbb84143398f9ea74563e6e97a as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a 2024-11-19T18:31:23,438 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a, entries=1, sequenceid=9, filesize=5.9 K 2024-11-19T18:31:23,441 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 28ms, sequenceid=9, compaction requested=false 2024-11-19T18:31:23,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: 2024-11-19T18:31:23,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:23,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-19T18:31:23,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-19T18:31:23,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T18:31:23,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-19T18:31:23,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-19T18:31:23,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:23,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:24,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:24,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:25,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:25,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:26,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:26,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:27,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:27,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:27,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:31:27,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta after 68032ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T18:31:28,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:28,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:29,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:29,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:30,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:30,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:31,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:31,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:32,078 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:31:32,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:32,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:33,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T18:31:33,343 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T18:31:33,346 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C38405%2C1732041062143.1732041093345 2024-11-19T18:31:33,351 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:33,352 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:33,352 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:33,352 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:33,352 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:33,352 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041062524 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041093345 2024-11-19T18:31:33,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42521:42521),(127.0.0.1/127.0.0.1:46843:46843)] 2024-11-19T18:31:33,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041062524 is not closed yet, will try archiving it next time 2024-11-19T18:31:33,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741833_1009 (size=5546) 2024-11-19T18:31:33,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:33,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741833_1009 (size=5546) 2024-11-19T18:31:33,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:33,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T18:31:33,357 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T18:31:33,357 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T18:31:33,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T18:31:33,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-19T18:31:33,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:33,511 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T18:31:33,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad49fd75eb934f629b350a75e24abb72 is 1080, key is row0003/info:/1732041093344/Put/seqid=0 2024-11-19T18:31:33,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741840_1016 (size=6033) 2024-11-19T18:31:33,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741840_1016 (size=6033) 2024-11-19T18:31:33,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:33,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:33,922 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad49fd75eb934f629b350a75e24abb72 2024-11-19T18:31:33,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad49fd75eb934f629b350a75e24abb72 as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72 2024-11-19T18:31:33,934 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72, entries=1, sequenceid=13, filesize=5.9 K 2024-11-19T18:31:33,935 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 424ms, sequenceid=13, compaction requested=true 2024-11-19T18:31:33,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: 2024-11-19T18:31:33,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:33,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-19T18:31:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-19T18:31:33,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T18:31:33,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 579 msec 2024-11-19T18:31:33,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 587 msec 2024-11-19T18:31:34,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:34,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:35,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:35,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:36,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:36,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:37,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:37,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:38,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:38,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:39,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:39,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:40,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:40,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:41,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:41,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:42,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:42,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:43,135 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T18:31:43,135 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T18:31:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T18:31:43,393 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T18:31:43,393 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:31:43,394 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:31:43,394 DEBUG [Time-limited test {}] regionserver.HStore(1541): 6f21dcf66b69b19f8fbfbfc2ac1cac63/info is initiating minor compaction (all files) 2024-11-19T18:31:43,394 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:31:43,394 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:43,394 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 6f21dcf66b69b19f8fbfbfc2ac1cac63/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:43,395 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72] into tmpdir=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp, totalSize=17.7 K 2024-11-19T18:31:43,395 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 60fa1218171948b3a188d6bb0e6b079d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732041073169 2024-11-19T18:31:43,395 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5da2bdfbb84143398f9ea74563e6e97a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732041083254 2024-11-19T18:31:43,396 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ad49fd75eb934f629b350a75e24abb72, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732041093344 2024-11-19T18:31:43,406 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 6f21dcf66b69b19f8fbfbfc2ac1cac63#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:31:43,407 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/10d6c96a4b434d6db417f9e691008e72 is 1080, key is row0001/info:/1732041073169/Put/seqid=0 2024-11-19T18:31:43,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741841_1017 (size=8296) 2024-11-19T18:31:43,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741841_1017 (size=8296) 2024-11-19T18:31:43,418 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/10d6c96a4b434d6db417f9e691008e72 as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/10d6c96a4b434d6db417f9e691008e72 2024-11-19T18:31:43,425 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6f21dcf66b69b19f8fbfbfc2ac1cac63/info of 6f21dcf66b69b19f8fbfbfc2ac1cac63 into 10d6c96a4b434d6db417f9e691008e72(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:31:43,425 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: 2024-11-19T18:31:43,428 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C38405%2C1732041062143.1732041103428 2024-11-19T18:31:43,434 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:43,434 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:43,434 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:43,434 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:43,434 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:43,434 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041093345 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041103428 2024-11-19T18:31:43,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741839_1015 (size=2520) 2024-11-19T18:31:43,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741839_1015 (size=2520) 2024-11-19T18:31:43,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46843:46843),(127.0.0.1/127.0.0.1:42521:42521)] 2024-11-19T18:31:43,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041093345 is not closed yet, will try archiving it next time 2024-11-19T18:31:43,440 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041062524 to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs/30db5f576be8%2C38405%2C1732041062143.1732041062524 2024-11-19T18:31:43,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:31:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T18:31:43,444 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T18:31:43,445 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T18:31:43,445 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T18:31:43,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-19T18:31:43,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:43,598 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T18:31:43,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/d052c662340e4913946d7919e8229b7f is 1080, key is row0000/info:/1732041103427/Put/seqid=0 2024-11-19T18:31:43,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741843_1019 (size=6033) 2024-11-19T18:31:43,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741843_1019 (size=6033) 2024-11-19T18:31:43,608 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/d052c662340e4913946d7919e8229b7f 2024-11-19T18:31:43,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/d052c662340e4913946d7919e8229b7f as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/d052c662340e4913946d7919e8229b7f 2024-11-19T18:31:43,618 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/d052c662340e4913946d7919e8229b7f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-19T18:31:43,619 INFO [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 21ms, sequenceid=18, compaction requested=false 2024-11-19T18:31:43,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: 2024-11-19T18:31:43,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:43,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-19T18:31:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-19T18:31:43,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-19T18:31:43,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-19T18:31:43,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-19T18:31:43,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:43,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:44,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:44,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:45,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:45,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:46,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:46,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:47,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:47,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:48,411 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6f21dcf66b69b19f8fbfbfc2ac1cac63, had cached 0 bytes from a total of 14329 2024-11-19T18:31:48,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:48,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:49,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:49,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:50,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:50,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:51,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:51,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:52,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:52,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T18:31:53,512 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T18:31:53,515 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C38405%2C1732041062143.1732041113515 2024-11-19T18:31:53,521 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,521 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,521 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,521 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,522 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041103428 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041113515 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42521:42521),(127.0.0.1/127.0.0.1:46843:46843)] 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041103428 is not closed yet, will try archiving it next time 2024-11-19T18:31:53,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:31:53,523 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/WALs/30db5f576be8,38405,1732041062143/30db5f576be8%2C38405%2C1732041062143.1732041093345 to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs/30db5f576be8%2C38405%2C1732041062143.1732041093345 2024-11-19T18:31:53,523 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:53,523 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:31:53,523 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1971934903, stopped=false 2024-11-19T18:31:53,524 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,39295,1732041062094 2024-11-19T18:31:53,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741842_1018 (size=2026) 2024-11-19T18:31:53,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741842_1018 (size=2026) 2024-11-19T18:31:53,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:53,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:53,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:53,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:53,525 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:31:53,525 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:31:53,526 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:31:53,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:53,526 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,38405,1732041062143' ***** 2024-11-19T18:31:53,526 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:31:53,526 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:31:53,526 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:53,526 INFO [RS:0;30db5f576be8:38405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:31:53,526 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:31:53,526 INFO [RS:0;30db5f576be8:38405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:31:53,526 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(3091): Received CLOSE for 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:53,527 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,38405,1732041062143 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:38405. 2024-11-19T18:31:53,527 DEBUG [RS:0;30db5f576be8:38405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:31:53,527 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6f21dcf66b69b19f8fbfbfc2ac1cac63, disabling compactions & flushes 2024-11-19T18:31:53,527 DEBUG [RS:0;30db5f576be8:38405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:53,527 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:53,527 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:31:53,527 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. after waiting 0 ms 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:31:53,527 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:31:53,527 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T18:31:53,527 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T18:31:53,527 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1325): Online Regions={6f21dcf66b69b19f8fbfbfc2ac1cac63=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T18:31:53,528 DEBUG [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6f21dcf66b69b19f8fbfbfc2ac1cac63 2024-11-19T18:31:53,528 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:31:53,528 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:31:53,528 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:31:53,528 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:31:53,528 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:31:53,528 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-19T18:31:53,531 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad753fc9e7e84297af41a009ecf3fab7 is 1080, key is row0001/info:/1732041113514/Put/seqid=0 2024-11-19T18:31:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741845_1021 (size=6033) 2024-11-19T18:31:53,537 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad753fc9e7e84297af41a009ecf3fab7 2024-11-19T18:31:53,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741845_1021 (size=6033) 2024-11-19T18:31:53,543 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/.tmp/info/ad753fc9e7e84297af41a009ecf3fab7 as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad753fc9e7e84297af41a009ecf3fab7 2024-11-19T18:31:53,545 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/info/54a6856d4b564b8c806ca8e59ef331ef is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63./info:regioninfo/1732041063426/Put/seqid=0 2024-11-19T18:31:53,549 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad753fc9e7e84297af41a009ecf3fab7, entries=1, sequenceid=22, filesize=5.9 K 2024-11-19T18:31:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741846_1022 (size=7308) 2024-11-19T18:31:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741846_1022 (size=7308) 2024-11-19T18:31:53,551 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 23ms, sequenceid=22, compaction requested=true 2024-11-19T18:31:53,551 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/info/54a6856d4b564b8c806ca8e59ef331ef 2024-11-19T18:31:53,551 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72] to archive 2024-11-19T18:31:53,552 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:31:53,554 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/60fa1218171948b3a188d6bb0e6b079d 2024-11-19T18:31:53,555 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/5da2bdfbb84143398f9ea74563e6e97a 2024-11-19T18:31:53,556 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72 to hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/info/ad49fd75eb934f629b350a75e24abb72 2024-11-19T18:31:53,557 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30db5f576be8:39295 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-19T18:31:53,557 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [60fa1218171948b3a188d6bb0e6b079d=6033, 5da2bdfbb84143398f9ea74563e6e97a=6033, ad49fd75eb934f629b350a75e24abb72=6033] 2024-11-19T18:31:53,561 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6f21dcf66b69b19f8fbfbfc2ac1cac63/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-19T18:31:53,561 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:53,561 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6f21dcf66b69b19f8fbfbfc2ac1cac63: Waiting for close lock at 1732041113527Running coprocessor pre-close hooks at 1732041113527Disabling compacts and flushes for region at 1732041113527Disabling writes for close at 1732041113527Obtaining lock to block concurrent updates at 1732041113527Preparing flush snapshotting stores in 6f21dcf66b69b19f8fbfbfc2ac1cac63 at 1732041113527Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732041113528 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. at 1732041113528Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63/info: creating writer at 1732041113529 (+1 ms)Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63/info: appending metadata at 1732041113531 (+2 ms)Flushing 6f21dcf66b69b19f8fbfbfc2ac1cac63/info: closing flushed file at 1732041113531Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a9b94ee: reopening flushed file at 1732041113543 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6f21dcf66b69b19f8fbfbfc2ac1cac63 in 23ms, sequenceid=22, compaction requested=true at 1732041113551 (+8 ms)Writing region close event to WAL at 1732041113557 (+6 ms)Running coprocessor post-close hooks at 1732041113561 (+4 ms)Closed at 1732041113561 2024-11-19T18:31:53,562 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732041063072.6f21dcf66b69b19f8fbfbfc2ac1cac63. 2024-11-19T18:31:53,572 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/ns/70180b0bf29744a3b2011ca01ea984cc is 43, key is default/ns:d/1732041062950/Put/seqid=0 2024-11-19T18:31:53,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741847_1023 (size=5153) 2024-11-19T18:31:53,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741847_1023 (size=5153) 2024-11-19T18:31:53,578 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/ns/70180b0bf29744a3b2011ca01ea984cc 2024-11-19T18:31:53,597 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/table/34f45ef007ef4e739cbddee17b0f4c0f is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732041063435/Put/seqid=0 2024-11-19T18:31:53,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741848_1024 (size=5508) 2024-11-19T18:31:53,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741848_1024 (size=5508) 2024-11-19T18:31:53,602 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/table/34f45ef007ef4e739cbddee17b0f4c0f 2024-11-19T18:31:53,607 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/info/54a6856d4b564b8c806ca8e59ef331ef as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/info/54a6856d4b564b8c806ca8e59ef331ef 2024-11-19T18:31:53,612 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/info/54a6856d4b564b8c806ca8e59ef331ef, entries=10, sequenceid=11, filesize=7.1 K 2024-11-19T18:31:53,613 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/ns/70180b0bf29744a3b2011ca01ea984cc as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/ns/70180b0bf29744a3b2011ca01ea984cc 2024-11-19T18:31:53,618 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/ns/70180b0bf29744a3b2011ca01ea984cc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T18:31:53,619 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/.tmp/table/34f45ef007ef4e739cbddee17b0f4c0f as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/table/34f45ef007ef4e739cbddee17b0f4c0f 2024-11-19T18:31:53,625 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/table/34f45ef007ef4e739cbddee17b0f4c0f, entries=2, sequenceid=11, filesize=5.4 K 2024-11-19T18:31:53,626 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-19T18:31:53,631 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T18:31:53,632 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:31:53,632 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:31:53,632 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041113527Running coprocessor pre-close hooks at 1732041113527Disabling compacts and flushes for region at 1732041113527Disabling writes for close at 1732041113528 (+1 ms)Obtaining lock to block concurrent updates at 1732041113528Preparing flush snapshotting stores in 1588230740 at 1732041113528Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732041113528Flushing stores of hbase:meta,,1.1588230740 at 1732041113529 (+1 ms)Flushing 1588230740/info: creating writer at 1732041113529Flushing 1588230740/info: appending metadata at 1732041113545 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732041113545Flushing 1588230740/ns: creating writer at 1732041113557 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732041113572 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732041113572Flushing 1588230740/table: creating writer at 1732041113582 (+10 ms)Flushing 1588230740/table: appending metadata at 1732041113596 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732041113596Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24c330dc: reopening flushed file at 1732041113607 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3264d325: reopening flushed file at 1732041113612 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67ce5f7c: reopening flushed file at 1732041113618 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1732041113626 (+8 ms)Writing region close event to WAL at 1732041113628 (+2 ms)Running coprocessor post-close hooks at 1732041113632 (+4 ms)Closed at 1732041113632 2024-11-19T18:31:53,632 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:31:53,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:53,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:53,728 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,38405,1732041062143; all regions closed. 2024-11-19T18:31:53,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741834_1010 (size=3306) 2024-11-19T18:31:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741834_1010 (size=3306) 2024-11-19T18:31:53,733 DEBUG [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs 2024-11-19T18:31:53,733 INFO [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C38405%2C1732041062143.meta:.meta(num 1732041062904) 2024-11-19T18:31:53,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,734 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741844_1020 (size=1252) 2024-11-19T18:31:53,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741844_1020 (size=1252) 2024-11-19T18:31:53,738 DEBUG [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/oldWALs 2024-11-19T18:31:53,738 INFO [RS:0;30db5f576be8:38405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C38405%2C1732041062143:(num 1732041113515) 2024-11-19T18:31:53,738 DEBUG [RS:0;30db5f576be8:38405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:53,738 INFO [RS:0;30db5f576be8:38405 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:31:53,738 INFO [RS:0;30db5f576be8:38405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:31:53,738 INFO [RS:0;30db5f576be8:38405 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T18:31:53,739 INFO [RS:0;30db5f576be8:38405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:31:53,739 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:31:53,739 INFO [RS:0;30db5f576be8:38405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38405 2024-11-19T18:31:53,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,38405,1732041062143 2024-11-19T18:31:53,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:31:53,741 INFO [RS:0;30db5f576be8:38405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:31:53,741 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,38405,1732041062143] 2024-11-19T18:31:53,744 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,38405,1732041062143 already deleted, retry=false 2024-11-19T18:31:53,744 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,38405,1732041062143 expired; onlineServers=0 2024-11-19T18:31:53,744 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,39295,1732041062094' ***** 2024-11-19T18:31:53,744 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:31:53,744 INFO [M:0;30db5f576be8:39295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:31:53,744 INFO [M:0;30db5f576be8:39295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:31:53,744 DEBUG [M:0;30db5f576be8:39295 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:31:53,744 DEBUG [M:0;30db5f576be8:39295 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:31:53,744 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:31:53,744 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041062304 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041062304,5,FailOnTimeoutGroup] 2024-11-19T18:31:53,744 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041062306 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041062306,5,FailOnTimeoutGroup] 2024-11-19T18:31:53,745 INFO [M:0;30db5f576be8:39295 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:31:53,745 INFO [M:0;30db5f576be8:39295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:31:53,745 DEBUG [M:0;30db5f576be8:39295 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:31:53,745 INFO [M:0;30db5f576be8:39295 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:31:53,745 INFO [M:0;30db5f576be8:39295 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:31:53,745 INFO [M:0;30db5f576be8:39295 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:31:53,745 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:31:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:31:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:53,746 DEBUG [M:0;30db5f576be8:39295 {}] zookeeper.ZKUtil(347): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:31:53,746 WARN [M:0;30db5f576be8:39295 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:31:53,747 INFO [M:0;30db5f576be8:39295 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/.lastflushedseqids 2024-11-19T18:31:53,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741849_1025 (size=130) 2024-11-19T18:31:53,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741849_1025 (size=130) 2024-11-19T18:31:53,752 INFO [M:0;30db5f576be8:39295 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:31:53,752 INFO [M:0;30db5f576be8:39295 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:31:53,752 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:31:53,752 INFO [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:53,753 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:53,753 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:31:53,753 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:53,753 INFO [M:0;30db5f576be8:39295 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-19T18:31:53,768 DEBUG [M:0;30db5f576be8:39295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1869628305fe422ab9fa9e7f9a58726b is 82, key is hbase:meta,,1/info:regioninfo/1732041062927/Put/seqid=0 2024-11-19T18:31:53,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741850_1026 (size=5672) 2024-11-19T18:31:53,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741850_1026 (size=5672) 2024-11-19T18:31:53,774 INFO [M:0;30db5f576be8:39295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1869628305fe422ab9fa9e7f9a58726b 2024-11-19T18:31:53,793 DEBUG [M:0;30db5f576be8:39295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6bd38aca034ec18fe537f47c7fb78c is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732041063439/Put/seqid=0 2024-11-19T18:31:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741851_1027 (size=7818) 2024-11-19T18:31:53,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741851_1027 (size=7818) 2024-11-19T18:31:53,798 INFO [M:0;30db5f576be8:39295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6bd38aca034ec18fe537f47c7fb78c 2024-11-19T18:31:53,802 INFO [M:0;30db5f576be8:39295 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec6bd38aca034ec18fe537f47c7fb78c 2024-11-19T18:31:53,817 DEBUG [M:0;30db5f576be8:39295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d236fa589a5b4f5b98a46ea507ce7951 is 69, key is 30db5f576be8,38405,1732041062143/rs:state/1732041062377/Put/seqid=0 2024-11-19T18:31:53,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741852_1028 (size=5156) 2024-11-19T18:31:53,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741852_1028 (size=5156) 2024-11-19T18:31:53,822 INFO [M:0;30db5f576be8:39295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d236fa589a5b4f5b98a46ea507ce7951 2024-11-19T18:31:53,842 DEBUG [M:0;30db5f576be8:39295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d039b8b73ef472c83e4c00133cf079e is 52, key is load_balancer_on/state:d/1732041063069/Put/seqid=0 2024-11-19T18:31:53,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:53,842 INFO [RS:0;30db5f576be8:38405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:31:53,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38405-0x101317f21500001, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:53,842 INFO [RS:0;30db5f576be8:38405 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,38405,1732041062143; zookeeper connection closed. 2024-11-19T18:31:53,843 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2fdba6ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2fdba6ee 2024-11-19T18:31:53,843 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:31:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741853_1029 (size=5056) 2024-11-19T18:31:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741853_1029 (size=5056) 2024-11-19T18:31:53,847 INFO [M:0;30db5f576be8:39295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d039b8b73ef472c83e4c00133cf079e 2024-11-19T18:31:53,853 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1869628305fe422ab9fa9e7f9a58726b as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1869628305fe422ab9fa9e7f9a58726b 2024-11-19T18:31:53,858 INFO [M:0;30db5f576be8:39295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1869628305fe422ab9fa9e7f9a58726b, entries=8, sequenceid=121, filesize=5.5 K 2024-11-19T18:31:53,858 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6bd38aca034ec18fe537f47c7fb78c as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec6bd38aca034ec18fe537f47c7fb78c 2024-11-19T18:31:53,863 INFO [M:0;30db5f576be8:39295 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec6bd38aca034ec18fe537f47c7fb78c 2024-11-19T18:31:53,863 INFO [M:0;30db5f576be8:39295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec6bd38aca034ec18fe537f47c7fb78c, entries=14, sequenceid=121, filesize=7.6 K 2024-11-19T18:31:53,864 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d236fa589a5b4f5b98a46ea507ce7951 as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d236fa589a5b4f5b98a46ea507ce7951 2024-11-19T18:31:53,868 INFO [M:0;30db5f576be8:39295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d236fa589a5b4f5b98a46ea507ce7951, entries=1, sequenceid=121, filesize=5.0 K 2024-11-19T18:31:53,869 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d039b8b73ef472c83e4c00133cf079e as hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3d039b8b73ef472c83e4c00133cf079e 2024-11-19T18:31:53,873 INFO [M:0;30db5f576be8:39295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34875/user/jenkins/test-data/50b8a656-a3f1-bc7c-971e-9076270dee6f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3d039b8b73ef472c83e4c00133cf079e, entries=1, sequenceid=121, filesize=4.9 K 2024-11-19T18:31:53,874 INFO [M:0;30db5f576be8:39295 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=121, compaction requested=false 2024-11-19T18:31:53,876 INFO [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:53,876 DEBUG [M:0;30db5f576be8:39295 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041113752Disabling compacts and flushes for region at 1732041113752Disabling writes for close at 1732041113753 (+1 ms)Obtaining lock to block concurrent updates at 1732041113753Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732041113753Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1732041113753Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732041113754 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732041113754Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732041113768 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732041113768Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732041113778 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732041113793 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732041113793Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732041113803 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732041113817 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732041113817Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732041113827 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732041113841 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732041113841Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d7ca37a: reopening flushed file at 1732041113852 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@565da96d: reopening flushed file at 1732041113858 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3643a840: reopening flushed file at 1732041113863 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7aa0f199: reopening flushed file at 1732041113868 (+5 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=121, compaction requested=false at 1732041113874 (+6 ms)Writing region close event to WAL at 1732041113875 (+1 ms)Closed at 1732041113875 2024-11-19T18:31:53,876 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,876 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,876 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,876 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,876 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:31:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34981 is added to blk_1073741830_1006 (size=52987) 2024-11-19T18:31:53,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41887 is added to blk_1073741830_1006 (size=52987) 2024-11-19T18:31:53,879 INFO [M:0;30db5f576be8:39295 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:31:53,879 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:31:53,879 INFO [M:0;30db5f576be8:39295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39295 2024-11-19T18:31:53,879 INFO [M:0;30db5f576be8:39295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:31:53,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:53,981 INFO [M:0;30db5f576be8:39295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:31:53,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39295-0x101317f21500000, quorum=127.0.0.1:51554, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:31:53,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8afa355{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:53,984 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39212263{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:53,984 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:53,984 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7101d128{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:53,984 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@374dfdaf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:53,986 WARN [BP-764291916-172.17.0.2-1732041061417 heartbeating to localhost/127.0.0.1:34875 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:31:53,986 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:31:53,986 WARN [BP-764291916-172.17.0.2-1732041061417 heartbeating to localhost/127.0.0.1:34875 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-764291916-172.17.0.2-1732041061417 (Datanode Uuid 46adfa70-b3b2-45a1-918f-6cc21495ecef) service to localhost/127.0.0.1:34875 2024-11-19T18:31:53,986 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:31:53,987 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data3/current/BP-764291916-172.17.0.2-1732041061417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:53,987 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data4/current/BP-764291916-172.17.0.2-1732041061417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:53,987 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:31:53,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f08894b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:53,989 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ac0122b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:53,989 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:53,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521506bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:53,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68744dfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:53,990 WARN [BP-764291916-172.17.0.2-1732041061417 heartbeating to localhost/127.0.0.1:34875 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:31:53,991 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:31:53,991 WARN [BP-764291916-172.17.0.2-1732041061417 heartbeating to localhost/127.0.0.1:34875 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-764291916-172.17.0.2-1732041061417 (Datanode Uuid ed0b9b24-7ca1-4733-ac59-6960c903d193) service to localhost/127.0.0.1:34875 2024-11-19T18:31:53,991 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:31:53,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data1/current/BP-764291916-172.17.0.2-1732041061417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:53,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/cluster_f23ddb7c-97ba-18de-ed96-3257160edcbd/data/data2/current/BP-764291916-172.17.0.2-1732041061417 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:31:53,991 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:31:54,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@520bb90c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:31:54,002 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79c156a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:31:54,002 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:31:54,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56673fe9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:31:54,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1159c3f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir/,STOPPED} 2024-11-19T18:31:54,008 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:31:54,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:31:54,035 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34875 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34875 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34875 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34875 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34875 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/30db5f576be8:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34875 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34875 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34875 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=59 (was 123), ProcessCount=11 (was 11), AvailableMemoryMB=6751 (was 6939) 2024-11-19T18:31:54,043 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=59, ProcessCount=11, AvailableMemoryMB=6751 2024-11-19T18:31:54,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.log.dir so I do NOT create it in target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5e12cf77-a42c-298c-5d78-3bb4ad6c44e8/hadoop.tmp.dir so I do NOT create it in target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a, deleteOnExit=true 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/test.cache.data in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:31:54,044 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:31:54,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:31:54,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:31:54,058 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:31:54,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:54,130 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:54,131 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:54,131 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:54,132 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:31:54,132 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:54,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59c539e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:54,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78d2a49d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:54,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63d9f0e2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/java.io.tmpdir/jetty-localhost-36349-hadoop-hdfs-3_4_1-tests_jar-_-any-14601404124136508531/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:31:54,248 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38dc0fd7{HTTP/1.1, (http/1.1)}{localhost:36349} 2024-11-19T18:31:54,248 INFO [Time-limited test {}] server.Server(415): Started @237630ms 2024-11-19T18:31:54,261 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:31:54,330 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:54,333 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:54,334 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:54,334 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:54,334 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:31:54,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45890504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:54,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@459363d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:54,392 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:31:54,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ecf816b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/java.io.tmpdir/jetty-localhost-46109-hadoop-hdfs-3_4_1-tests_jar-_-any-15974023422716571041/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:54,449 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5411f427{HTTP/1.1, (http/1.1)}{localhost:46109} 2024-11-19T18:31:54,449 INFO [Time-limited test {}] server.Server(415): Started @237831ms 2024-11-19T18:31:54,450 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:31:54,479 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:31:54,481 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:31:54,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:31:54,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:31:54,482 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:31:54,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9612b29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:31:54,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51585bde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:31:54,549 WARN [Thread-1948 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data1/current/BP-21126910-172.17.0.2-1732041114065/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:54,549 WARN [Thread-1949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data2/current/BP-21126910-172.17.0.2-1732041114065/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:54,566 WARN [Thread-1927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:31:54,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf5f2a41c1c31517 with lease ID 0xdf893f774111073a: Processing first storage report for DS-658e6bce-3b4d-4f87-8e18-7efe88e0a7ab from datanode DatanodeRegistration(127.0.0.1:42347, datanodeUuid=889d51b1-b22e-4e1e-8174-e18adb36ee9c, infoPort=35375, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065) 2024-11-19T18:31:54,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf5f2a41c1c31517 with lease ID 0xdf893f774111073a: from storage DS-658e6bce-3b4d-4f87-8e18-7efe88e0a7ab node DatanodeRegistration(127.0.0.1:42347, datanodeUuid=889d51b1-b22e-4e1e-8174-e18adb36ee9c, infoPort=35375, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:54,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf5f2a41c1c31517 with lease ID 0xdf893f774111073a: Processing first storage report for DS-51501995-6cc3-438e-b386-9c72811cc41f from datanode DatanodeRegistration(127.0.0.1:42347, datanodeUuid=889d51b1-b22e-4e1e-8174-e18adb36ee9c, infoPort=35375, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065) 2024-11-19T18:31:54,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf5f2a41c1c31517 with lease ID 0xdf893f774111073a: from storage DS-51501995-6cc3-438e-b386-9c72811cc41f node DatanodeRegistration(127.0.0.1:42347, datanodeUuid=889d51b1-b22e-4e1e-8174-e18adb36ee9c, infoPort=35375, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:54,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41a74ab6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/java.io.tmpdir/jetty-localhost-44407-hadoop-hdfs-3_4_1-tests_jar-_-any-15250251673875196533/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:31:54,600 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d7cc900{HTTP/1.1, (http/1.1)}{localhost:44407} 2024-11-19T18:31:54,600 INFO [Time-limited test {}] server.Server(415): Started @237982ms 2024-11-19T18:31:54,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:31:54,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:54,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:54,701 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data3/current/BP-21126910-172.17.0.2-1732041114065/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:54,701 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data4/current/BP-21126910-172.17.0.2-1732041114065/current, will proceed with Du for space computation calculation, 2024-11-19T18:31:54,718 WARN [Thread-1963 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:31:54,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf15cd0c125f3846 with lease ID 0xdf893f774111073b: Processing first storage report for DS-efdef1bc-ec0f-45f8-b7fa-f2423b607daa from datanode DatanodeRegistration(127.0.0.1:35377, datanodeUuid=0a560c69-43c3-4ced-913d-4f23635c8c31, infoPort=43735, infoSecurePort=0, ipcPort=34515, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065) 2024-11-19T18:31:54,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf15cd0c125f3846 with lease ID 0xdf893f774111073b: from storage DS-efdef1bc-ec0f-45f8-b7fa-f2423b607daa node DatanodeRegistration(127.0.0.1:35377, datanodeUuid=0a560c69-43c3-4ced-913d-4f23635c8c31, infoPort=43735, infoSecurePort=0, ipcPort=34515, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T18:31:54,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf15cd0c125f3846 with lease ID 0xdf893f774111073b: Processing first storage report for DS-7340e01d-54a8-4bd6-b5a5-b16eee20e70a from datanode DatanodeRegistration(127.0.0.1:35377, datanodeUuid=0a560c69-43c3-4ced-913d-4f23635c8c31, infoPort=43735, infoSecurePort=0, ipcPort=34515, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065) 2024-11-19T18:31:54,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf15cd0c125f3846 with lease ID 0xdf893f774111073b: from storage DS-7340e01d-54a8-4bd6-b5a5-b16eee20e70a node DatanodeRegistration(127.0.0.1:35377, datanodeUuid=0a560c69-43c3-4ced-913d-4f23635c8c31, infoPort=43735, infoSecurePort=0, ipcPort=34515, storageInfo=lv=-57;cid=testClusterID;nsid=1040943466;c=1732041114065), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:31:54,723 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9 2024-11-19T18:31:54,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/zookeeper_0, clientPort=63904, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:31:54,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63904 2024-11-19T18:31:54,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:31:54,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:31:54,737 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165 with version=8 2024-11-19T18:31:54,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:31:54,739 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:31:54,739 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:31:54,740 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44151 2024-11-19T18:31:54,741 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44151 connecting to ZooKeeper ensemble=127.0.0.1:63904 2024-11-19T18:31:54,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441510x0, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:31:54,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44151-0x101317feef50000 connected 2024-11-19T18:31:54,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,769 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:54,770 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165, hbase.cluster.distributed=false 2024-11-19T18:31:54,772 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:31:54,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44151 2024-11-19T18:31:54,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44151 2024-11-19T18:31:54,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44151 2024-11-19T18:31:54,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44151 2024-11-19T18:31:54,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44151 2024-11-19T18:31:54,788 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:31:54,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:31:54,789 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33931 2024-11-19T18:31:54,790 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33931 connecting to ZooKeeper ensemble=127.0.0.1:63904 2024-11-19T18:31:54,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339310x0, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:31:54,796 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339310x0, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:31:54,796 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33931-0x101317feef50001 connected 2024-11-19T18:31:54,796 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:31:54,797 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:31:54,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:31:54,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:31:54,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33931 2024-11-19T18:31:54,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33931 2024-11-19T18:31:54,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33931 2024-11-19T18:31:54,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33931 2024-11-19T18:31:54,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33931 2024-11-19T18:31:54,811 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:44151 2024-11-19T18:31:54,812 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:54,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:54,814 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:31:54,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,815 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:31:54,816 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,44151,1732041114739 from backup master directory 2024-11-19T18:31:54,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:54,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:31:54,819 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:31:54,819 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,823 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/hbase.id] with ID: f8f4bb5f-2eea-42a9-8dcf-c2ca562e287a 2024-11-19T18:31:54,823 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/.tmp/hbase.id 2024-11-19T18:31:54,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:31:54,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:31:54,828 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/.tmp/hbase.id]:[hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/hbase.id] 2024-11-19T18:31:54,838 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:54,838 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:31:54,840 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T18:31:54,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:31:54,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:31:54,852 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:31:54,853 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:31:54,854 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:54,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:31:54,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:31:54,863 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store 2024-11-19T18:31:54,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:31:54,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:31:54,869 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:54,869 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:31:54,869 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041114869Disabling compacts and flushes for region at 1732041114869Disabling writes for close at 1732041114869Writing region close event to WAL at 1732041114869Closed at 1732041114869 2024-11-19T18:31:54,870 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/.initializing 2024-11-19T18:31:54,870 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/WALs/30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,872 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C44151%2C1732041114739, suffix=, logDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/WALs/30db5f576be8,44151,1732041114739, archiveDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/oldWALs, maxLogs=10 2024-11-19T18:31:54,873 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C44151%2C1732041114739.1732041114873 2024-11-19T18:31:54,877 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/WALs/30db5f576be8,44151,1732041114739/30db5f576be8%2C44151%2C1732041114739.1732041114873 2024-11-19T18:31:54,878 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35375:35375),(127.0.0.1/127.0.0.1:43735:43735)] 2024-11-19T18:31:54,883 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:54,883 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:54,883 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,883 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:31:54,887 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:54,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:31:54,889 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:54,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:31:54,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:54,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:31:54,893 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:54,893 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,894 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,894 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,895 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,896 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,896 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:31:54,897 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:31:54,899 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:54,899 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873613, jitterRate=0.11085724830627441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:31:54,900 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732041114883Initializing all the Stores at 1732041114884 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041114884Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041114886 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041114886Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041114886Cleaning up temporary data from old regions at 1732041114896 (+10 ms)Region opened successfully at 1732041114900 (+4 ms) 2024-11-19T18:31:54,904 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:31:54,907 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768f0689, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:31:54,908 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:31:54,908 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:31:54,908 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:31:54,909 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:31:54,909 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:31:54,909 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:31:54,909 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:31:54,911 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:31:54,912 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:31:54,914 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:31:54,914 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:31:54,915 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:31:54,915 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:31:54,916 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:31:54,917 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:31:54,920 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:31:54,920 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:31:54,921 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:31:54,923 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:31:54,924 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:31:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:31:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,926 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,44151,1732041114739, sessionid=0x101317feef50000, setting cluster-up flag (Was=false) 2024-11-19T18:31:54,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,934 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:31:54,935 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:54,943 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:31:54,944 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,44151,1732041114739 2024-11-19T18:31:54,946 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:31:54,947 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:54,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:31:54,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:31:54,948 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,44151,1732041114739 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:31:54,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041144951 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:31:54,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:31:54,952 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:54,952 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:31:54,952 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:31:54,952 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:54,952 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:31:54,952 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:31:54,953 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041114953,5,FailOnTimeoutGroup] 2024-11-19T18:31:54,953 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041114953,5,FailOnTimeoutGroup] 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:54,953 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:54,954 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,954 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:31:54,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:31:54,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:31:54,961 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:31:54,961 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165 2024-11-19T18:31:54,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:31:54,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:31:54,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:54,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:31:54,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:31:54,972 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:54,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:31:54,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:31:54,973 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:54,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:31:54,975 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:31:54,975 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:54,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:31:54,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:31:54,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:54,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:54,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:31:54,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740 2024-11-19T18:31:54,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740 2024-11-19T18:31:54,979 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:31:54,979 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:31:54,979 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:31:54,981 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:31:54,983 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:54,983 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783676, jitterRate=-0.003505215048789978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:31:54,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732041114969Initializing all the Stores at 1732041114970 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041114970Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041114970Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041114970Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041114970Cleaning up temporary data from old regions at 1732041114979 (+9 ms)Region opened successfully at 1732041114984 (+5 ms) 2024-11-19T18:31:54,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:31:54,984 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:31:54,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:31:54,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:31:54,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:31:54,985 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:31:54,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041114984Disabling compacts and flushes for region at 1732041114984Disabling writes for close at 1732041114984Writing region close event to WAL at 1732041114985 (+1 ms)Closed at 1732041114985 2024-11-19T18:31:54,986 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:54,986 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:31:54,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:31:54,988 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:31:54,990 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:31:55,001 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(746): ClusterId : f8f4bb5f-2eea-42a9-8dcf-c2ca562e287a 2024-11-19T18:31:55,001 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:31:55,003 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:31:55,003 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:31:55,005 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:31:55,006 DEBUG [RS:0;30db5f576be8:33931 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@590a5598, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:31:55,018 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:33931 2024-11-19T18:31:55,018 INFO [RS:0;30db5f576be8:33931 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:31:55,018 INFO [RS:0;30db5f576be8:33931 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:31:55,018 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:31:55,019 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,44151,1732041114739 with port=33931, startcode=1732041114788 2024-11-19T18:31:55,019 DEBUG [RS:0;30db5f576be8:33931 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:31:55,021 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57523, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:31:55,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,024 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165 2024-11-19T18:31:55,024 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33131 2024-11-19T18:31:55,024 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:31:55,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:31:55,027 DEBUG [RS:0;30db5f576be8:33931 {}] zookeeper.ZKUtil(111): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,027 WARN [RS:0;30db5f576be8:33931 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:31:55,027 INFO [RS:0;30db5f576be8:33931 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:55,027 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,028 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,33931,1732041114788] 2024-11-19T18:31:55,031 INFO [RS:0;30db5f576be8:33931 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:31:55,032 INFO [RS:0;30db5f576be8:33931 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:31:55,033 INFO [RS:0;30db5f576be8:33931 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:31:55,033 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,033 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:31:55,034 INFO [RS:0;30db5f576be8:33931 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:31:55,034 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,034 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:31:55,035 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:31:55,035 DEBUG [RS:0;30db5f576be8:33931 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,035 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,33931,1732041114788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:31:55,052 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:31:55,052 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,33931,1732041114788-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,052 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,052 INFO [RS:0;30db5f576be8:33931 {}] regionserver.Replication(171): 30db5f576be8,33931,1732041114788 started 2024-11-19T18:31:55,068 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,068 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,33931,1732041114788, RpcServer on 30db5f576be8/172.17.0.2:33931, sessionid=0x101317feef50001 2024-11-19T18:31:55,068 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:31:55,068 DEBUG [RS:0;30db5f576be8:33931 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,068 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,33931,1732041114788' 2024-11-19T18:31:55,068 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,33931,1732041114788' 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:31:55,069 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:31:55,070 DEBUG [RS:0;30db5f576be8:33931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:31:55,070 INFO [RS:0;30db5f576be8:33931 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:31:55,070 INFO [RS:0;30db5f576be8:33931 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:31:55,140 WARN [30db5f576be8:44151 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:31:55,172 INFO [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C33931%2C1732041114788, suffix=, logDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788, archiveDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs, maxLogs=32 2024-11-19T18:31:55,172 INFO [RS:0;30db5f576be8:33931 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C33931%2C1732041114788.1732041115172 2024-11-19T18:31:55,180 INFO [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041115172 2024-11-19T18:31:55,182 DEBUG [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35375:35375),(127.0.0.1/127.0.0.1:43735:43735)] 2024-11-19T18:31:55,390 DEBUG [30db5f576be8:44151 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:31:55,391 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,392 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,33931,1732041114788, state=OPENING 2024-11-19T18:31:55,393 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:31:55,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:55,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:31:55,395 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:55,395 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:55,395 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:31:55,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,33931,1732041114788}] 2024-11-19T18:31:55,548 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:31:55,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53149, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:31:55,554 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:31:55,554 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:31:55,556 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C33931%2C1732041114788.meta, suffix=.meta, logDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788, archiveDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs, maxLogs=32 2024-11-19T18:31:55,556 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C33931%2C1732041114788.meta.1732041115556.meta 2024-11-19T18:31:55,561 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.meta.1732041115556.meta 2024-11-19T18:31:55,562 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35375:35375),(127.0.0.1/127.0.0.1:43735:43735)] 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:31:55,563 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:31:55,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:31:55,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:31:55,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:31:55,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:55,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:55,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:31:55,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:31:55,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:55,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:55,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:31:55,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:31:55,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:55,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:55,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:31:55,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:31:55,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:55,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:31:55,569 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:31:55,570 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740 2024-11-19T18:31:55,571 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740 2024-11-19T18:31:55,572 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:31:55,572 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:31:55,572 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:31:55,573 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:31:55,574 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795181, jitterRate=0.011126011610031128}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:31:55,574 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:31:55,575 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732041115563Writing region info on filesystem at 1732041115563Initializing all the Stores at 1732041115564 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041115564Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041115564Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041115564Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041115564Cleaning up temporary data from old regions at 1732041115572 (+8 ms)Running coprocessor post-open hooks at 1732041115574 (+2 ms)Region opened successfully at 1732041115575 (+1 ms) 2024-11-19T18:31:55,576 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732041115548 2024-11-19T18:31:55,578 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:31:55,578 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:31:55,579 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,579 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,33931,1732041114788, state=OPEN 2024-11-19T18:31:55,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:31:55,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:31:55,584 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:55,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:31:55,586 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:31:55,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,33931,1732041114788 in 189 msec 2024-11-19T18:31:55,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:31:55,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 600 msec 2024-11-19T18:31:55,590 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:31:55,590 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:31:55,591 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:31:55,591 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,33931,1732041114788, seqNum=-1] 2024-11-19T18:31:55,592 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:31:55,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57463, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:31:55,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-11-19T18:31:55,597 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732041115597, completionTime=-1 2024-11-19T18:31:55,597 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:31:55,597 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041175599 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041235599 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,599 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:44151, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,600 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,601 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.784sec 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:31:55,603 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:31:55,606 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:31:55,606 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:31:55,606 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,44151,1732041114739-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:31:55,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:55,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:55,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eab25ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:55,702 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,44151,-1 for getting cluster id 2024-11-19T18:31:55,702 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:31:55,703 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f8f4bb5f-2eea-42a9-8dcf-c2ca562e287a' 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f8f4bb5f-2eea-42a9-8dcf-c2ca562e287a" 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@369f8df5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,44151,-1] 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:31:55,704 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:31:55,706 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56210, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:31:55,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e483a61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:31:55,707 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:31:55,707 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,33931,1732041114788, seqNum=-1] 2024-11-19T18:31:55,708 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:31:55,709 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38540, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:31:55,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,44151,1732041114739 2024-11-19T18:31:55,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:31:55,713 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:31:55,713 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T18:31:55,714 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 30db5f576be8,44151,1732041114739 2024-11-19T18:31:55,714 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@539e25ee 2024-11-19T18:31:55,714 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T18:31:55,715 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T18:31:55,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T18:31:55,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T18:31:55,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:31:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-19T18:31:55,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T18:31:55,719 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:55,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-19T18:31:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:31:55,720 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T18:31:55,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741835_1011 (size=381) 2024-11-19T18:31:55,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741835_1011 (size=381) 2024-11-19T18:31:55,728 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 86f3d38b72b610f50be445126a88d870, NAME => 'TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165 2024-11-19T18:31:55,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741836_1012 (size=64) 2024-11-19T18:31:55,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741836_1012 (size=64) 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 86f3d38b72b610f50be445126a88d870, disabling compactions & flushes 2024-11-19T18:31:55,735 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. after waiting 0 ms 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:55,735 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:55,735 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 86f3d38b72b610f50be445126a88d870: Waiting for close lock at 1732041115735Disabling compacts and flushes for region at 1732041115735Disabling writes for close at 1732041115735Writing region close event to WAL at 1732041115735Closed at 1732041115735 2024-11-19T18:31:55,737 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T18:31:55,737 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732041115737"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732041115737"}]},"ts":"1732041115737"} 2024-11-19T18:31:55,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T18:31:55,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T18:31:55,740 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041115740"}]},"ts":"1732041115740"} 2024-11-19T18:31:55,742 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-19T18:31:55,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, ASSIGN}] 2024-11-19T18:31:55,744 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, ASSIGN 2024-11-19T18:31:55,745 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, ASSIGN; state=OFFLINE, location=30db5f576be8,33931,1732041114788; forceNewPlan=false, retain=false 2024-11-19T18:31:55,895 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=86f3d38b72b610f50be445126a88d870, regionState=OPENING, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:31:55,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, ASSIGN because future has completed 2024-11-19T18:31:55,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788}] 2024-11-19T18:31:56,055 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:56,055 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 86f3d38b72b610f50be445126a88d870, NAME => 'TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:31:56,055 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,055 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:31:56,055 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,055 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,057 INFO [StoreOpener-86f3d38b72b610f50be445126a88d870-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,058 INFO [StoreOpener-86f3d38b72b610f50be445126a88d870-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 86f3d38b72b610f50be445126a88d870 columnFamilyName info 2024-11-19T18:31:56,058 DEBUG [StoreOpener-86f3d38b72b610f50be445126a88d870-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:31:56,059 INFO [StoreOpener-86f3d38b72b610f50be445126a88d870-1 {}] regionserver.HStore(327): Store=86f3d38b72b610f50be445126a88d870/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:31:56,059 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,060 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,060 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,060 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,060 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,062 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,064 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:31:56,064 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 86f3d38b72b610f50be445126a88d870; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719134, jitterRate=-0.08557386696338654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:31:56,064 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 86f3d38b72b610f50be445126a88d870 2024-11-19T18:31:56,065 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 86f3d38b72b610f50be445126a88d870: Running coprocessor pre-open hook at 1732041116056Writing region info on filesystem at 1732041116056Initializing all the Stores at 1732041116056Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041116056Cleaning up temporary data from old regions at 1732041116060 (+4 ms)Running coprocessor post-open hooks at 1732041116064 (+4 ms)Region opened successfully at 1732041116065 (+1 ms) 2024-11-19T18:31:56,066 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., pid=6, masterSystemTime=1732041116051 2024-11-19T18:31:56,068 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:56,068 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:31:56,069 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=86f3d38b72b610f50be445126a88d870, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:31:56,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788 because future has completed 2024-11-19T18:31:56,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T18:31:56,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788 in 175 msec 2024-11-19T18:31:56,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T18:31:56,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, ASSIGN in 332 msec 2024-11-19T18:31:56,078 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T18:31:56,078 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732041116078"}]},"ts":"1732041116078"} 2024-11-19T18:31:56,081 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-19T18:31:56,082 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T18:31:56,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 366 msec 2024-11-19T18:31:56,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:56,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:57,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:57,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:58,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:58,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:58,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:59,087 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:31:59,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:31:59,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:31:59,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:00,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:00,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:01,031 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T18:32:01,031 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-19T18:32:01,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:01,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:01,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-19T18:32:01,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T18:32:01,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T18:32:02,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:02,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:03,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:03,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:04,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:04,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:05,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:05,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T18:32:05,782 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-19T18:32:05,782 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-19T18:32:05,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-19T18:32:05,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:05,788 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., hostname=30db5f576be8,33931,1732041114788, seqNum=2] 2024-11-19T18:32:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:05,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:32:05,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/d91dcfb69feb4b22b33de815a6351d7d is 1080, key is row0001/info:/1732041125789/Put/seqid=0 2024-11-19T18:32:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741837_1013 (size=12509) 2024-11-19T18:32:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741837_1013 (size=12509) 2024-11-19T18:32:05,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/d91dcfb69feb4b22b33de815a6351d7d 2024-11-19T18:32:05,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/d91dcfb69feb4b22b33de815a6351d7d as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d 2024-11-19T18:32:05,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T18:32:05,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 86f3d38b72b610f50be445126a88d870 in 36ms, sequenceid=11, compaction requested=false 2024-11-19T18:32:05,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:05,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-19T18:32:05,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/fd0ff2fabf3640eaa5664803ec0da70c is 1080, key is row0008/info:/1732041125800/Put/seqid=0 2024-11-19T18:32:05,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741838_1014 (size=29761) 2024-11-19T18:32:05,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741838_1014 (size=29761) 2024-11-19T18:32:05,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/fd0ff2fabf3640eaa5664803ec0da70c 2024-11-19T18:32:05,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/fd0ff2fabf3640eaa5664803ec0da70c as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c 2024-11-19T18:32:05,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c, entries=23, sequenceid=37, filesize=29.1 K 2024-11-19T18:32:05,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 86f3d38b72b610f50be445126a88d870 in 20ms, sequenceid=37, compaction requested=false 2024-11-19T18:32:05,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:05,857 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-19T18:32:05,857 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:05,857 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c because midkey is the same as first or last row 2024-11-19T18:32:06,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:06,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:07,363 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:32:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:07,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:07,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:07,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:07,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:32:07,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/81778e2c7afb4596835c8d2b7471792f is 1080, key is row0031/info:/1732041125838/Put/seqid=0 2024-11-19T18:32:07,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741839_1015 (size=12509) 2024-11-19T18:32:07,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741839_1015 (size=12509) 2024-11-19T18:32:07,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/81778e2c7afb4596835c8d2b7471792f 2024-11-19T18:32:07,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/81778e2c7afb4596835c8d2b7471792f as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f 2024-11-19T18:32:07,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f, entries=7, sequenceid=47, filesize=12.2 K 2024-11-19T18:32:07,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 86f3d38b72b610f50be445126a88d870 in 25ms, sequenceid=47, compaction requested=true 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c because midkey is the same as first or last row 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 86f3d38b72b610f50be445126a88d870:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:07,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:07,875 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:07,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-19T18:32:07,876 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:07,876 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 86f3d38b72b610f50be445126a88d870/info is initiating minor compaction (all files) 2024-11-19T18:32:07,877 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 86f3d38b72b610f50be445126a88d870/info in TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:07,877 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp, totalSize=53.5 K 2024-11-19T18:32:07,877 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting d91dcfb69feb4b22b33de815a6351d7d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732041125789 2024-11-19T18:32:07,878 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting fd0ff2fabf3640eaa5664803ec0da70c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732041125800 2024-11-19T18:32:07,878 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81778e2c7afb4596835c8d2b7471792f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732041125838 2024-11-19T18:32:07,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/28e6d1feae144d47b9016c803fb02919 is 1080, key is row0038/info:/1732041127850/Put/seqid=0 2024-11-19T18:32:07,892 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 86f3d38b72b610f50be445126a88d870#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:07,892 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/e807a8895cfd442cad8f191adf417c82 is 1080, key is row0001/info:/1732041125789/Put/seqid=0 2024-11-19T18:32:07,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741840_1016 (size=23299) 2024-11-19T18:32:07,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741841_1017 (size=44978) 2024-11-19T18:32:07,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741841_1017 (size=44978) 2024-11-19T18:32:07,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741840_1016 (size=23299) 2024-11-19T18:32:07,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/28e6d1feae144d47b9016c803fb02919 2024-11-19T18:32:07,903 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/e807a8895cfd442cad8f191adf417c82 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 2024-11-19T18:32:07,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/28e6d1feae144d47b9016c803fb02919 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919 2024-11-19T18:32:07,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919, entries=17, sequenceid=67, filesize=22.8 K 2024-11-19T18:32:07,909 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 86f3d38b72b610f50be445126a88d870/info of 86f3d38b72b610f50be445126a88d870 into e807a8895cfd442cad8f191adf417c82(size=43.9 K), total size for store is 66.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:07,909 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., storeName=86f3d38b72b610f50be445126a88d870/info, priority=13, startTime=1732041127875; duration=0sec 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:07,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for 86f3d38b72b610f50be445126a88d870 in 33ms, sequenceid=67, compaction requested=false 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:07,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-19T18:32:07,909 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-19T18:32:07,909 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:07,909 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:07,910 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:07,910 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 86f3d38b72b610f50be445126a88d870:info 2024-11-19T18:32:08,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:08,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:09,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:09,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:09,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T18:32:09,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/101834b9426e457f971cb496ed52eb40 is 1080, key is row0055/info:/1732041127877/Put/seqid=0 2024-11-19T18:32:09,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741842_1018 (size=16817) 2024-11-19T18:32:09,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741842_1018 (size=16817) 2024-11-19T18:32:09,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/101834b9426e457f971cb496ed52eb40 2024-11-19T18:32:09,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/101834b9426e457f971cb496ed52eb40 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40 2024-11-19T18:32:09,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40, entries=11, sequenceid=82, filesize=16.4 K 2024-11-19T18:32:09,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 86f3d38b72b610f50be445126a88d870 in 23ms, sequenceid=82, compaction requested=true 2024-11-19T18:32:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:09,917 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,918 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,918 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:09,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 86f3d38b72b610f50be445126a88d870:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:09,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:09,918 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:09,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:09,919 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:09,919 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 86f3d38b72b610f50be445126a88d870/info is initiating minor compaction (all files) 2024-11-19T18:32:09,919 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 86f3d38b72b610f50be445126a88d870/info in TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:09,919 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp, totalSize=83.1 K 2024-11-19T18:32:09,920 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting e807a8895cfd442cad8f191adf417c82, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732041125789 2024-11-19T18:32:09,920 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28e6d1feae144d47b9016c803fb02919, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=67, earliestPutTs=1732041127850 2024-11-19T18:32:09,921 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 101834b9426e457f971cb496ed52eb40, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732041127877 2024-11-19T18:32:09,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/891d4b1623c249b58b05408186d1bfb0 is 1080, key is row0066/info:/1732041129896/Put/seqid=0 2024-11-19T18:32:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741843_1019 (size=21141) 2024-11-19T18:32:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741843_1019 (size=21141) 2024-11-19T18:32:09,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/891d4b1623c249b58b05408186d1bfb0 2024-11-19T18:32:09,934 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 86f3d38b72b610f50be445126a88d870#info#compaction#62 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:09,934 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/2697ac83ab1d48c1af4df311187a62ee is 1080, key is row0001/info:/1732041125789/Put/seqid=0 2024-11-19T18:32:09,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/891d4b1623c249b58b05408186d1bfb0 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/891d4b1623c249b58b05408186d1bfb0 2024-11-19T18:32:09,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/891d4b1623c249b58b05408186d1bfb0, entries=15, sequenceid=100, filesize=20.6 K 2024-11-19T18:32:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741844_1020 (size=75378) 2024-11-19T18:32:09,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 86f3d38b72b610f50be445126a88d870 in 24ms, sequenceid=100, compaction requested=false 2024-11-19T18:32:09,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:09,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741844_1020 (size=75378) 2024-11-19T18:32:09,943 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.7 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,943 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,943 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 because midkey is the same as first or last row 2024-11-19T18:32:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:09,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T18:32:09,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/af46b3d584e14931a88ad6eee1670033 is 1080, key is row0081/info:/1732041129919/Put/seqid=0 2024-11-19T18:32:09,949 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/2697ac83ab1d48c1af4df311187a62ee as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee 2024-11-19T18:32:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741845_1021 (size=20064) 2024-11-19T18:32:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741845_1021 (size=20064) 2024-11-19T18:32:09,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/af46b3d584e14931a88ad6eee1670033 2024-11-19T18:32:09,955 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 86f3d38b72b610f50be445126a88d870/info of 86f3d38b72b610f50be445126a88d870 into 2697ac83ab1d48c1af4df311187a62ee(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:09,955 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., storeName=86f3d38b72b610f50be445126a88d870/info, priority=13, startTime=1732041129918; duration=0sec 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,955 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,956 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:09,956 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:09,956 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 86f3d38b72b610f50be445126a88d870:info 2024-11-19T18:32:09,958 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] assignment.AssignmentManager(1355): Split request from 30db5f576be8,33931,1732041114788, parent={ENCODED => 86f3d38b72b610f50be445126a88d870, NAME => 'TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T18:32:09,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/af46b3d584e14931a88ad6eee1670033 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/af46b3d584e14931a88ad6eee1670033 2024-11-19T18:32:09,963 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=30db5f576be8,33931,1732041114788 2024-11-19T18:32:09,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/af46b3d584e14931a88ad6eee1670033, entries=14, sequenceid=117, filesize=19.6 K 2024-11-19T18:32:09,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for 86f3d38b72b610f50be445126a88d870 in 20ms, sequenceid=117, compaction requested=true 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 86f3d38b72b610f50be445126a88d870: 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T18:32:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-19T18:32:09,967 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=86f3d38b72b610f50be445126a88d870, daughterA=32b10d34e9cd1a30469e17eeed3e0b1e, daughterB=63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:09,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=86f3d38b72b610f50be445126a88d870, daughterA=32b10d34e9cd1a30469e17eeed3e0b1e, daughterB=63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:09,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=86f3d38b72b610f50be445126a88d870, daughterA=32b10d34e9cd1a30469e17eeed3e0b1e, daughterB=63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:09,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=86f3d38b72b610f50be445126a88d870, daughterA=32b10d34e9cd1a30469e17eeed3e0b1e, daughterB=63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:09,969 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] assignment.AssignmentManager(1355): Split request from 30db5f576be8,33931,1732041114788, parent={ENCODED => 86f3d38b72b610f50be445126a88d870, NAME => 'TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T18:32:09,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44151 {}] assignment.AssignmentManager(1370): Ignoring split request from 30db5f576be8,33931,1732041114788, parent={ENCODED => 86f3d38b72b610f50be445126a88d870, NAME => 'TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-19T18:32:09,975 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, UNASSIGN}] 2024-11-19T18:32:09,976 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, UNASSIGN 2024-11-19T18:32:09,977 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=86f3d38b72b610f50be445126a88d870, regionState=CLOSING, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:32:09,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, UNASSIGN because future has completed 2024-11-19T18:32:09,980 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-19T18:32:09,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788}] 2024-11-19T18:32:10,137 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,137 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-19T18:32:10,137 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 86f3d38b72b610f50be445126a88d870, disabling compactions & flushes 2024-11-19T18:32:10,137 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:10,137 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:10,137 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. after waiting 0 ms 2024-11-19T18:32:10,137 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:10,137 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 86f3d38b72b610f50be445126a88d870 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-19T18:32:10,142 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/f19fde26f8034f8fbb2ebeb3cb89466e is 1080, key is row0095/info:/1732041129945/Put/seqid=0 2024-11-19T18:32:10,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741846_1022 (size=7112) 2024-11-19T18:32:10,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741846_1022 (size=7112) 2024-11-19T18:32:10,147 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/f19fde26f8034f8fbb2ebeb3cb89466e 2024-11-19T18:32:10,152 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/.tmp/info/f19fde26f8034f8fbb2ebeb3cb89466e as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/f19fde26f8034f8fbb2ebeb3cb89466e 2024-11-19T18:32:10,157 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/f19fde26f8034f8fbb2ebeb3cb89466e, entries=2, sequenceid=123, filesize=6.9 K 2024-11-19T18:32:10,157 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 86f3d38b72b610f50be445126a88d870 in 20ms, sequenceid=123, compaction requested=true 2024-11-19T18:32:10,158 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40] to archive 2024-11-19T18:32:10,159 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:32:10,161 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/d91dcfb69feb4b22b33de815a6351d7d 2024-11-19T18:32:10,162 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/fd0ff2fabf3640eaa5664803ec0da70c 2024-11-19T18:32:10,163 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/e807a8895cfd442cad8f191adf417c82 2024-11-19T18:32:10,164 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/81778e2c7afb4596835c8d2b7471792f 2024-11-19T18:32:10,165 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/28e6d1feae144d47b9016c803fb02919 2024-11-19T18:32:10,166 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/101834b9426e457f971cb496ed52eb40 2024-11-19T18:32:10,172 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-19T18:32:10,173 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. 2024-11-19T18:32:10,173 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 86f3d38b72b610f50be445126a88d870: Waiting for close lock at 1732041130137Running coprocessor pre-close hooks at 1732041130137Disabling compacts and flushes for region at 1732041130137Disabling writes for close at 1732041130137Obtaining lock to block concurrent updates at 1732041130138 (+1 ms)Preparing flush snapshotting stores in 86f3d38b72b610f50be445126a88d870 at 1732041130138Finished memstore snapshotting TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1732041130138Flushing stores of TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. at 1732041130139 (+1 ms)Flushing 86f3d38b72b610f50be445126a88d870/info: creating writer at 1732041130139Flushing 86f3d38b72b610f50be445126a88d870/info: appending metadata at 1732041130141 (+2 ms)Flushing 86f3d38b72b610f50be445126a88d870/info: closing flushed file at 1732041130141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cff5f0e: reopening flushed file at 1732041130152 (+11 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 86f3d38b72b610f50be445126a88d870 in 20ms, sequenceid=123, compaction requested=true at 1732041130157 (+5 ms)Writing region close event to WAL at 1732041130168 (+11 ms)Running coprocessor post-close hooks at 1732041130173 (+5 ms)Closed at 1732041130173 2024-11-19T18:32:10,175 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,175 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=86f3d38b72b610f50be445126a88d870, regionState=CLOSED 2024-11-19T18:32:10,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788 because future has completed 2024-11-19T18:32:10,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-19T18:32:10,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 86f3d38b72b610f50be445126a88d870, server=30db5f576be8,33931,1732041114788 in 198 msec 2024-11-19T18:32:10,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T18:32:10,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=86f3d38b72b610f50be445126a88d870, UNASSIGN in 205 msec 2024-11-19T18:32:10,189 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:10,193 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=86f3d38b72b610f50be445126a88d870, threads=4 2024-11-19T18:32:10,195 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/891d4b1623c249b58b05408186d1bfb0 for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,195 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/f19fde26f8034f8fbb2ebeb3cb89466e for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,195 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/af46b3d584e14931a88ad6eee1670033 for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,195 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,207 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/af46b3d584e14931a88ad6eee1670033, top=true 2024-11-19T18:32:10,207 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/f19fde26f8034f8fbb2ebeb3cb89466e, top=true 2024-11-19T18:32:10,207 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/891d4b1623c249b58b05408186d1bfb0, top=true 2024-11-19T18:32:10,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741847_1023 (size=27) 2024-11-19T18:32:10,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741847_1023 (size=27) 2024-11-19T18:32:10,215 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e for child: 63180f0cb91b87611df480c0f7ab38fd, parent: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,215 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033 for child: 63180f0cb91b87611df480c0f7ab38fd, parent: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,215 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0 for child: 63180f0cb91b87611df480c0f7ab38fd, parent: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,215 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/f19fde26f8034f8fbb2ebeb3cb89466e for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,215 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/891d4b1623c249b58b05408186d1bfb0 for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,215 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/af46b3d584e14931a88ad6eee1670033 for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741848_1024 (size=27) 2024-11-19T18:32:10,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741848_1024 (size=27) 2024-11-19T18:32:10,224 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee for region: 86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:10,226 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 86f3d38b72b610f50be445126a88d870 Daughter A: [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870] storefiles, Daughter B: [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e] storefiles. 2024-11-19T18:32:10,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741849_1025 (size=71) 2024-11-19T18:32:10,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741849_1025 (size=71) 2024-11-19T18:32:10,235 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:10,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741850_1026 (size=71) 2024-11-19T18:32:10,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741850_1026 (size=71) 2024-11-19T18:32:10,248 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:10,258 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-19T18:32:10,260 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-19T18:32:10,262 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732041130262"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732041130262"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732041130262"}]},"ts":"1732041130262"} 2024-11-19T18:32:10,262 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732041130262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732041130262"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732041130262"}]},"ts":"1732041130262"} 2024-11-19T18:32:10,262 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732041130262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732041130262"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732041130262"}]},"ts":"1732041130262"} 2024-11-19T18:32:10,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=32b10d34e9cd1a30469e17eeed3e0b1e, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=63180f0cb91b87611df480c0f7ab38fd, ASSIGN}] 2024-11-19T18:32:10,280 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=63180f0cb91b87611df480c0f7ab38fd, ASSIGN 2024-11-19T18:32:10,281 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=32b10d34e9cd1a30469e17eeed3e0b1e, ASSIGN 2024-11-19T18:32:10,281 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=63180f0cb91b87611df480c0f7ab38fd, ASSIGN; state=SPLITTING_NEW, location=30db5f576be8,33931,1732041114788; forceNewPlan=false, retain=false 2024-11-19T18:32:10,281 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=32b10d34e9cd1a30469e17eeed3e0b1e, ASSIGN; state=SPLITTING_NEW, location=30db5f576be8,33931,1732041114788; forceNewPlan=false, retain=false 2024-11-19T18:32:10,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=32b10d34e9cd1a30469e17eeed3e0b1e, regionState=OPENING, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:32:10,432 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=63180f0cb91b87611df480c0f7ab38fd, regionState=OPENING, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:32:10,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=32b10d34e9cd1a30469e17eeed3e0b1e, ASSIGN because future has completed 2024-11-19T18:32:10,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32b10d34e9cd1a30469e17eeed3e0b1e, server=30db5f576be8,33931,1732041114788}] 2024-11-19T18:32:10,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=63180f0cb91b87611df480c0f7ab38fd, ASSIGN because future has completed 2024-11-19T18:32:10,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788}] 2024-11-19T18:32:10,590 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:10,591 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 32b10d34e9cd1a30469e17eeed3e0b1e, NAME => 'TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-19T18:32:10,591 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,591 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:10,591 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,591 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,592 INFO [StoreOpener-32b10d34e9cd1a30469e17eeed3e0b1e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,593 INFO [StoreOpener-32b10d34e9cd1a30469e17eeed3e0b1e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32b10d34e9cd1a30469e17eeed3e0b1e columnFamilyName info 2024-11-19T18:32:10,593 DEBUG [StoreOpener-32b10d34e9cd1a30469e17eeed3e0b1e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:10,603 DEBUG [StoreOpener-32b10d34e9cd1a30469e17eeed3e0b1e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-bottom 2024-11-19T18:32:10,603 INFO [StoreOpener-32b10d34e9cd1a30469e17eeed3e0b1e-1 {}] regionserver.HStore(327): Store=32b10d34e9cd1a30469e17eeed3e0b1e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:32:10,604 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,604 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,605 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,606 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,606 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,607 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,608 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 32b10d34e9cd1a30469e17eeed3e0b1e; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689809, jitterRate=-0.12286293506622314}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:32:10,608 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:10,609 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 32b10d34e9cd1a30469e17eeed3e0b1e: Running coprocessor pre-open hook at 1732041130591Writing region info on filesystem at 1732041130591Initializing all the Stores at 1732041130592 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041130592Cleaning up temporary data from old regions at 1732041130606 (+14 ms)Running coprocessor post-open hooks at 1732041130608 (+2 ms)Region opened successfully at 1732041130608 2024-11-19T18:32:10,609 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e., pid=12, masterSystemTime=1732041130587 2024-11-19T18:32:10,610 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 32b10d34e9cd1a30469e17eeed3e0b1e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:10,610 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:10,610 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T18:32:10,610 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:10,610 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 32b10d34e9cd1a30469e17eeed3e0b1e/info is initiating minor compaction (all files) 2024-11-19T18:32:10,610 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 32b10d34e9cd1a30469e17eeed3e0b1e/info in TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:10,610 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-bottom] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/.tmp, totalSize=73.6 K 2024-11-19T18:32:10,611 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732041125789 2024-11-19T18:32:10,612 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:10,612 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:10,612 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:10,612 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 63180f0cb91b87611df480c0f7ab38fd, NAME => 'TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-19T18:32:10,612 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,612 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:10,612 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=32b10d34e9cd1a30469e17eeed3e0b1e, regionState=OPEN, openSeqNum=127, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:32:10,613 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,613 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,614 INFO [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,614 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-19T18:32:10,614 INFO [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 63180f0cb91b87611df480c0f7ab38fd columnFamilyName info 2024-11-19T18:32:10,614 DEBUG [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:10,614 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-19T18:32:10,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-19T18:32:10,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32b10d34e9cd1a30469e17eeed3e0b1e, server=30db5f576be8,33931,1732041114788 because future has completed 2024-11-19T18:32:10,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-19T18:32:10,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 32b10d34e9cd1a30469e17eeed3e0b1e, server=30db5f576be8,33931,1732041114788 in 181 msec 2024-11-19T18:32:10,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=32b10d34e9cd1a30469e17eeed3e0b1e, ASSIGN in 340 msec 2024-11-19T18:32:10,630 DEBUG [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-top 2024-11-19T18:32:10,631 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 32b10d34e9cd1a30469e17eeed3e0b1e#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:10,632 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/.tmp/info/32fb8658964849db8f6e9582229e8a69 is 1080, key is row0001/info:/1732041125789/Put/seqid=0 2024-11-19T18:32:10,635 DEBUG [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0 2024-11-19T18:32:10,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741851_1027 (size=70862) 2024-11-19T18:32:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741851_1027 (size=70862) 2024-11-19T18:32:10,639 DEBUG [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033 2024-11-19T18:32:10,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/2c67a0a0eaa443f5beec054ec2459a8d is 193, key is TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd./info:regioninfo/1732041130432/Put/seqid=0 2024-11-19T18:32:10,643 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/.tmp/info/32fb8658964849db8f6e9582229e8a69 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/32fb8658964849db8f6e9582229e8a69 2024-11-19T18:32:10,645 DEBUG [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e 2024-11-19T18:32:10,645 INFO [StoreOpener-63180f0cb91b87611df480c0f7ab38fd-1 {}] regionserver.HStore(327): Store=63180f0cb91b87611df480c0f7ab38fd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:32:10,645 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,646 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,647 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,647 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,647 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,649 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,650 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 32b10d34e9cd1a30469e17eeed3e0b1e/info of 32b10d34e9cd1a30469e17eeed3e0b1e into 32fb8658964849db8f6e9582229e8a69(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:10,650 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 32b10d34e9cd1a30469e17eeed3e0b1e: 2024-11-19T18:32:10,650 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e., storeName=32b10d34e9cd1a30469e17eeed3e0b1e/info, priority=15, startTime=1732041130610; duration=0sec 2024-11-19T18:32:10,650 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:10,650 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 32b10d34e9cd1a30469e17eeed3e0b1e:info 2024-11-19T18:32:10,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741852_1028 (size=9847) 2024-11-19T18:32:10,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741852_1028 (size=9847) 2024-11-19T18:32:10,651 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 63180f0cb91b87611df480c0f7ab38fd; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785267, jitterRate=-0.001481577754020691}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T18:32:10,651 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:10,651 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 63180f0cb91b87611df480c0f7ab38fd: Running coprocessor pre-open hook at 1732041130613Writing region info on filesystem at 1732041130613Initializing all the Stores at 1732041130613Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041130613Cleaning up temporary data from old regions at 1732041130647 (+34 ms)Running coprocessor post-open hooks at 1732041130651 (+4 ms)Region opened successfully at 1732041130651 2024-11-19T18:32:10,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/2c67a0a0eaa443f5beec054ec2459a8d 2024-11-19T18:32:10,652 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., pid=13, masterSystemTime=1732041130587 2024-11-19T18:32:10,652 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:10,652 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:10,652 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T18:32:10,655 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:10,655 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:10,655 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:10,655 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-top, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=120.8 K 2024-11-19T18:32:10,655 DEBUG [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:10,655 INFO [RS_OPEN_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:10,656 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732041125789 2024-11-19T18:32:10,656 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=63180f0cb91b87611df480c0f7ab38fd, regionState=OPEN, openSeqNum=127, regionLocation=30db5f576be8,33931,1732041114788 2024-11-19T18:32:10,656 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732041129896 2024-11-19T18:32:10,657 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732041129919 2024-11-19T18:32:10,657 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732041129945 2024-11-19T18:32:10,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 because future has completed 2024-11-19T18:32:10,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-19T18:32:10,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 in 224 msec 2024-11-19T18:32:10,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-19T18:32:10,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=63180f0cb91b87611df480c0f7ab38fd, ASSIGN in 383 msec 2024-11-19T18:32:10,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=86f3d38b72b610f50be445126a88d870, daughterA=32b10d34e9cd1a30469e17eeed3e0b1e, daughterB=63180f0cb91b87611df480c0f7ab38fd in 702 msec 2024-11-19T18:32:10,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/ns/df3b3b1b32f14ed2a65651b103d5d3c3 is 43, key is default/ns:d/1732041115593/Put/seqid=0 2024-11-19T18:32:10,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741853_1029 (size=5153) 2024-11-19T18:32:10,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741853_1029 (size=5153) 2024-11-19T18:32:10,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/ns/df3b3b1b32f14ed2a65651b103d5d3c3 2024-11-19T18:32:10,685 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:10,685 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/0f34981b1d0740639cb50cc38fa0a395 is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:10,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741854_1030 (size=43081) 2024-11-19T18:32:10,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741854_1030 (size=43081) 2024-11-19T18:32:10,695 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/0f34981b1d0740639cb50cc38fa0a395 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/0f34981b1d0740639cb50cc38fa0a395 2024-11-19T18:32:10,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:10,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:10,700 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into 0f34981b1d0740639cb50cc38fa0a395(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:10,700 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:10,700 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=12, startTime=1732041130652; duration=0sec 2024-11-19T18:32:10,700 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:10,700 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:10,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/table/fa70bd024f8c42be9b3d202384b5a775 is 65, key is TestLogRolling-testLogRolling/table:state/1732041116078/Put/seqid=0 2024-11-19T18:32:10,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741855_1031 (size=5340) 2024-11-19T18:32:10,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741855_1031 (size=5340) 2024-11-19T18:32:10,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/table/fa70bd024f8c42be9b3d202384b5a775 2024-11-19T18:32:10,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/2c67a0a0eaa443f5beec054ec2459a8d as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/info/2c67a0a0eaa443f5beec054ec2459a8d 2024-11-19T18:32:10,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/info/2c67a0a0eaa443f5beec054ec2459a8d, entries=30, sequenceid=17, filesize=9.6 K 2024-11-19T18:32:10,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/ns/df3b3b1b32f14ed2a65651b103d5d3c3 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/ns/df3b3b1b32f14ed2a65651b103d5d3c3 2024-11-19T18:32:10,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/ns/df3b3b1b32f14ed2a65651b103d5d3c3, entries=2, sequenceid=17, filesize=5.0 K 2024-11-19T18:32:10,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/table/fa70bd024f8c42be9b3d202384b5a775 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/table/fa70bd024f8c42be9b3d202384b5a775 2024-11-19T18:32:10,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/table/fa70bd024f8c42be9b3d202384b5a775, entries=2, sequenceid=17, filesize=5.2 K 2024-11-19T18:32:10,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 118ms, sequenceid=17, compaction requested=false 2024-11-19T18:32:10,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T18:32:11,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:11,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:11,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38540 deadline: 1732041141949, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. is not online on 30db5f576be8,33931,1732041114788 2024-11-19T18:32:11,973 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., hostname=30db5f576be8,33931,1732041114788, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., hostname=30db5f576be8,33931,1732041114788, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. is not online on 30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:11,974 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., hostname=30db5f576be8,33931,1732041114788, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870. is not online on 30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:11,974 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732041115716.86f3d38b72b610f50be445126a88d870., hostname=30db5f576be8,33931,1732041114788, seqNum=2 from cache 2024-11-19T18:32:12,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:12,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:13,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:13,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:14,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:14,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:15,675 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T18:32:15,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:15,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:15,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:15,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T18:32:16,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:16,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:17,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:17,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:18,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:18,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:19,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:19,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:20,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:20,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:21,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:21,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:22,038 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127] 2024-11-19T18:32:22,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:22,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:32:22,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7e8c1565a1e24b5e87646125cfe55d69 is 1080, key is row0097/info:/1732041142039/Put/seqid=0 2024-11-19T18:32:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741856_1032 (size=12516) 2024-11-19T18:32:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741856_1032 (size=12516) 2024-11-19T18:32:22,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7e8c1565a1e24b5e87646125cfe55d69 2024-11-19T18:32:22,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7e8c1565a1e24b5e87646125cfe55d69 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69 2024-11-19T18:32:22,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69, entries=7, sequenceid=137, filesize=12.2 K 2024-11-19T18:32:22,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 63180f0cb91b87611df480c0f7ab38fd in 22ms, sequenceid=137, compaction requested=false 2024-11-19T18:32:22,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:22,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T18:32:22,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 is 1080, key is row0104/info:/1732041142049/Put/seqid=0 2024-11-19T18:32:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741857_1033 (size=20078) 2024-11-19T18:32:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741857_1033 (size=20078) 2024-11-19T18:32:22,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 2024-11-19T18:32:22,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 2024-11-19T18:32:22,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5, entries=14, sequenceid=154, filesize=19.6 K 2024-11-19T18:32:22,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 63180f0cb91b87611df480c0f7ab38fd in 20ms, sequenceid=154, compaction requested=true 2024-11-19T18:32:22,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:22,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:22,092 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:22,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:22,093 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75675 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:22,093 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:22,093 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:22,094 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/0f34981b1d0740639cb50cc38fa0a395, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=73.9 K 2024-11-19T18:32:22,094 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f34981b1d0740639cb50cc38fa0a395, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732041127889 2024-11-19T18:32:22,094 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e8c1565a1e24b5e87646125cfe55d69, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732041142039 2024-11-19T18:32:22,095 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6d4bdbcaaa9a42aba8296e2777d4e8e5, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732041142049 2024-11-19T18:32:22,105 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#72 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:22,106 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/df051ce6308042ee8dfba0b8e0e738cc is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741858_1034 (size=65889) 2024-11-19T18:32:22,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741858_1034 (size=65889) 2024-11-19T18:32:22,116 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/df051ce6308042ee8dfba0b8e0e738cc as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/df051ce6308042ee8dfba0b8e0e738cc 2024-11-19T18:32:22,120 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into df051ce6308042ee8dfba0b8e0e738cc(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:22,121 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:22,121 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041142092; duration=0sec 2024-11-19T18:32:22,121 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:22,121 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:22,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:22,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:23,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:23,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:24,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:24,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T18:32:24,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7c8350f857e94069946a1d6c6b63bfb8 is 1080, key is row0118/info:/1732041142073/Put/seqid=0 2024-11-19T18:32:24,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741859_1035 (size=17906) 2024-11-19T18:32:24,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741859_1035 (size=17906) 2024-11-19T18:32:24,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7c8350f857e94069946a1d6c6b63bfb8 2024-11-19T18:32:24,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/7c8350f857e94069946a1d6c6b63bfb8 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8 2024-11-19T18:32:24,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8, entries=12, sequenceid=170, filesize=17.5 K 2024-11-19T18:32:24,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 22ms, sequenceid=170, compaction requested=false 2024-11-19T18:32:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:24,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:24,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/621fb28a346f41848c764072ad0fde56 is 1080, key is row0130/info:/1732041144093/Put/seqid=0 2024-11-19T18:32:24,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741860_1036 (size=21156) 2024-11-19T18:32:24,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741860_1036 (size=21156) 2024-11-19T18:32:24,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/621fb28a346f41848c764072ad0fde56 2024-11-19T18:32:24,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/621fb28a346f41848c764072ad0fde56 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56 2024-11-19T18:32:24,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56, entries=15, sequenceid=188, filesize=20.7 K 2024-11-19T18:32:24,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-19T18:32:24,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38540 deadline: 1732041154138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 2024-11-19T18:32:24,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 24ms, sequenceid=188, compaction requested=true 2024-11-19T18:32:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:24,139 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:24,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:24,140 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:24,140 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:24,140 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 because the exception is null or not the one we care about 2024-11-19T18:32:24,141 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104951 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:24,141 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:24,141 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:24,141 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/df051ce6308042ee8dfba0b8e0e738cc, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=102.5 K 2024-11-19T18:32:24,141 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting df051ce6308042ee8dfba0b8e0e738cc, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732041127889 2024-11-19T18:32:24,142 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c8350f857e94069946a1d6c6b63bfb8, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732041142073 2024-11-19T18:32:24,142 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 621fb28a346f41848c764072ad0fde56, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732041144093 2024-11-19T18:32:24,169 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#75 average throughput is 4.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:24,170 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/bf9894b427aa43ecb41a771df353b230 is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:24,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741861_1037 (size=95174) 2024-11-19T18:32:24,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741861_1037 (size=95174) 2024-11-19T18:32:24,189 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/bf9894b427aa43ecb41a771df353b230 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/bf9894b427aa43ecb41a771df353b230 2024-11-19T18:32:24,195 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into bf9894b427aa43ecb41a771df353b230(size=92.9 K), total size for store is 92.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:24,195 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:24,195 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041144139; duration=0sec 2024-11-19T18:32:24,195 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:24,195 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:24,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:24,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:24,723 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T18:32:25,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:25,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:26,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:26,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:27,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:27,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:28,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:28,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:29,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:29,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:30,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:30,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:31,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:31,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:32,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:32,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:33,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:33,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:34,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:34,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:34,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/9c3ec16da3e44ba39c2de0b77a09e159 is 1080, key is row0145/info:/1732041144115/Put/seqid=0 2024-11-19T18:32:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741862_1038 (size=21156) 2024-11-19T18:32:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741862_1038 (size=21156) 2024-11-19T18:32:34,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/9c3ec16da3e44ba39c2de0b77a09e159 2024-11-19T18:32:34,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/9c3ec16da3e44ba39c2de0b77a09e159 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159 2024-11-19T18:32:34,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159, entries=15, sequenceid=207, filesize=20.7 K 2024-11-19T18:32:34,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for 63180f0cb91b87611df480c0f7ab38fd in 19ms, sequenceid=207, compaction requested=false 2024-11-19T18:32:34,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:34,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:34,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:35,607 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T18:32:35,607 INFO [master/30db5f576be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T18:32:35,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:35,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:36,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:36,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:32:36,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6ada1560a1c940c1a969c4d63ac68c0b is 1080, key is row0160/info:/1732041154195/Put/seqid=0 2024-11-19T18:32:36,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741863_1039 (size=12516) 2024-11-19T18:32:36,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741863_1039 (size=12516) 2024-11-19T18:32:36,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6ada1560a1c940c1a969c4d63ac68c0b 2024-11-19T18:32:36,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/6ada1560a1c940c1a969c4d63ac68c0b as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b 2024-11-19T18:32:36,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b, entries=7, sequenceid=217, filesize=12.2 K 2024-11-19T18:32:36,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 24ms, sequenceid=217, compaction requested=true 2024-11-19T18:32:36,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:36,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:36,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:36,229 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:36,230 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:36,230 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:36,230 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:36,230 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/bf9894b427aa43ecb41a771df353b230, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=125.8 K 2024-11-19T18:32:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:36,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-19T18:32:36,230 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf9894b427aa43ecb41a771df353b230, keycount=83, bloomtype=ROW, size=92.9 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732041127889 2024-11-19T18:32:36,231 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c3ec16da3e44ba39c2de0b77a09e159, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732041144115 2024-11-19T18:32:36,231 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ada1560a1c940c1a969c4d63ac68c0b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732041154195 2024-11-19T18:32:36,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/18181b1f504e4aa89b6642002f1ad748 is 1080, key is row0167/info:/1732041156206/Put/seqid=0 2024-11-19T18:32:36,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741864_1040 (size=22238) 2024-11-19T18:32:36,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741864_1040 (size=22238) 2024-11-19T18:32:36,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/18181b1f504e4aa89b6642002f1ad748 2024-11-19T18:32:36,244 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#79 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:36,244 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/4820acf8b4cd4a1e891c24260a13e48f is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:36,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/18181b1f504e4aa89b6642002f1ad748 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748 2024-11-19T18:32:36,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741865_1041 (size=118996) 2024-11-19T18:32:36,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741865_1041 (size=118996) 2024-11-19T18:32:36,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748, entries=16, sequenceid=236, filesize=21.7 K 2024-11-19T18:32:36,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for 63180f0cb91b87611df480c0f7ab38fd in 24ms, sequenceid=236, compaction requested=false 2024-11-19T18:32:36,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:36,256 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/4820acf8b4cd4a1e891c24260a13e48f as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4820acf8b4cd4a1e891c24260a13e48f 2024-11-19T18:32:36,261 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into 4820acf8b4cd4a1e891c24260a13e48f(size=116.2 K), total size for store is 137.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:36,261 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:36,261 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041156229; duration=0sec 2024-11-19T18:32:36,261 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:36,261 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:36,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:36,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:37,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:37,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:38,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T18:32:38,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/ee89f18ac8d14596bb8249ed5799e983 is 1080, key is row0183/info:/1732041156231/Put/seqid=0 2024-11-19T18:32:38,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741866_1042 (size=16828) 2024-11-19T18:32:38,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741866_1042 (size=16828) 2024-11-19T18:32:38,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/ee89f18ac8d14596bb8249ed5799e983 2024-11-19T18:32:38,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/ee89f18ac8d14596bb8249ed5799e983 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983 2024-11-19T18:32:38,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983, entries=11, sequenceid=251, filesize=16.4 K 2024-11-19T18:32:38,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 24ms, sequenceid=251, compaction requested=true 2024-11-19T18:32:38,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:38,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:38,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:38,277 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:38,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:38,278 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158062 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:38,278 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:38,278 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:38,278 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4820acf8b4cd4a1e891c24260a13e48f, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=154.4 K 2024-11-19T18:32:38,279 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4820acf8b4cd4a1e891c24260a13e48f, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732041127889 2024-11-19T18:32:38,279 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18181b1f504e4aa89b6642002f1ad748, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732041156206 2024-11-19T18:32:38,280 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting ee89f18ac8d14596bb8249ed5799e983, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732041156231 2024-11-19T18:32:38,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/1130d024e3ce4b198158fd3e8e8e4b13 is 1080, key is row0194/info:/1732041158254/Put/seqid=0 2024-11-19T18:32:38,295 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#82 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:38,296 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/4da614a82bb64c9fa76627a68a54be7e is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:38,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741867_1043 (size=21168) 2024-11-19T18:32:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741867_1043 (size=21168) 2024-11-19T18:32:38,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/1130d024e3ce4b198158fd3e8e8e4b13 2024-11-19T18:32:38,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-19T18:32:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38540 deadline: 1732041168302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 2024-11-19T18:32:38,303 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:38,303 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=63180f0cb91b87611df480c0f7ab38fd, server=30db5f576be8,33931,1732041114788 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T18:32:38,303 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., hostname=30db5f576be8,33931,1732041114788, seqNum=127 because the exception is null or not the one we care about 2024-11-19T18:32:38,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/1130d024e3ce4b198158fd3e8e8e4b13 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13 2024-11-19T18:32:38,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741868_1044 (size=148409) 2024-11-19T18:32:38,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741868_1044 (size=148409) 2024-11-19T18:32:38,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13, entries=15, sequenceid=269, filesize=20.7 K 2024-11-19T18:32:38,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 34ms, sequenceid=269, compaction requested=false 2024-11-19T18:32:38,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:38,314 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/4da614a82bb64c9fa76627a68a54be7e as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4da614a82bb64c9fa76627a68a54be7e 2024-11-19T18:32:38,320 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into 4da614a82bb64c9fa76627a68a54be7e(size=144.9 K), total size for store is 165.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:38,320 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:38,320 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041158277; duration=0sec 2024-11-19T18:32:38,320 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:38,320 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:38,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:38,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:39,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:39,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:40,563 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-19T18:32:40,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:40,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:41,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:41,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:42,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:42,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:43,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:43,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:44,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:44,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:45,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:45,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:46,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:46,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:47,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:47,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:48,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:48,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c0a2fc1b0522444f96c8533edf061736 is 1080, key is row0209/info:/1732041158278/Put/seqid=0 2024-11-19T18:32:48,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741869_1045 (size=21171) 2024-11-19T18:32:48,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741869_1045 (size=21171) 2024-11-19T18:32:48,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c0a2fc1b0522444f96c8533edf061736 2024-11-19T18:32:48,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c0a2fc1b0522444f96c8533edf061736 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736 2024-11-19T18:32:48,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736, entries=15, sequenceid=288, filesize=20.7 K 2024-11-19T18:32:48,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for 63180f0cb91b87611df480c0f7ab38fd in 22ms, sequenceid=288, compaction requested=true 2024-11-19T18:32:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:48,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:48,385 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:48,386 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190748 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:48,386 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:48,386 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:48,386 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4da614a82bb64c9fa76627a68a54be7e, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=186.3 K 2024-11-19T18:32:48,387 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4da614a82bb64c9fa76627a68a54be7e, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732041127889 2024-11-19T18:32:48,387 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1130d024e3ce4b198158fd3e8e8e4b13, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732041158254 2024-11-19T18:32:48,387 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting c0a2fc1b0522444f96c8533edf061736, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732041158278 2024-11-19T18:32:48,398 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#84 average throughput is 83.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:48,398 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/e5df61f8034c4ab89341ff987b847d9c is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:48,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741870_1046 (size=180886) 2024-11-19T18:32:48,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741870_1046 (size=180886) 2024-11-19T18:32:48,407 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/e5df61f8034c4ab89341ff987b847d9c as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/e5df61f8034c4ab89341ff987b847d9c 2024-11-19T18:32:48,413 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into e5df61f8034c4ab89341ff987b847d9c(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:48,413 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:48,413 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041168385; duration=0sec 2024-11-19T18:32:48,413 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:48,413 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:48,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:48,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:49,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:49,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:50,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:50,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T18:32:50,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/766f3e8989764e07a7113904ea2a1f12 is 1080, key is row0224/info:/1732041168365/Put/seqid=0 2024-11-19T18:32:50,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741871_1047 (size=12523) 2024-11-19T18:32:50,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741871_1047 (size=12523) 2024-11-19T18:32:50,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/766f3e8989764e07a7113904ea2a1f12 2024-11-19T18:32:50,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/766f3e8989764e07a7113904ea2a1f12 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12 2024-11-19T18:32:50,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12, entries=7, sequenceid=299, filesize=12.2 K 2024-11-19T18:32:50,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 63180f0cb91b87611df480c0f7ab38fd in 21ms, sequenceid=299, compaction requested=false 2024-11-19T18:32:50,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:50,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33931 {}] regionserver.HRegion(8855): Flush requested on 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:50,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T18:32:50,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/8d1297aa365b454eba2b36a016f29d89 is 1080, key is row0231/info:/1732041170375/Put/seqid=0 2024-11-19T18:32:50,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741872_1048 (size=21171) 2024-11-19T18:32:50,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741872_1048 (size=21171) 2024-11-19T18:32:50,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/8d1297aa365b454eba2b36a016f29d89 2024-11-19T18:32:50,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/8d1297aa365b454eba2b36a016f29d89 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89 2024-11-19T18:32:50,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89, entries=15, sequenceid=317, filesize=20.7 K 2024-11-19T18:32:50,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 63180f0cb91b87611df480c0f7ab38fd in 18ms, sequenceid=317, compaction requested=true 2024-11-19T18:32:50,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:50,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 63180f0cb91b87611df480c0f7ab38fd:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T18:32:50,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:50,415 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T18:32:50,415 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214580 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T18:32:50,415 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1541): 63180f0cb91b87611df480c0f7ab38fd/info is initiating minor compaction (all files) 2024-11-19T18:32:50,416 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 63180f0cb91b87611df480c0f7ab38fd/info in TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:50,416 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/e5df61f8034c4ab89341ff987b847d9c, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89] into tmpdir=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp, totalSize=209.6 K 2024-11-19T18:32:50,416 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5df61f8034c4ab89341ff987b847d9c, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732041127889 2024-11-19T18:32:50,417 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 766f3e8989764e07a7113904ea2a1f12, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732041168365 2024-11-19T18:32:50,417 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d1297aa365b454eba2b36a016f29d89, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732041170375 2024-11-19T18:32:50,428 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 63180f0cb91b87611df480c0f7ab38fd#info#compaction#87 average throughput is 62.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T18:32:50,429 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/5e4f2fe3805d4586afd22942ca83f428 is 1080, key is row0062/info:/1732041127889/Put/seqid=0 2024-11-19T18:32:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741873_1049 (size=204803) 2024-11-19T18:32:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741873_1049 (size=204803) 2024-11-19T18:32:50,436 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/5e4f2fe3805d4586afd22942ca83f428 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/5e4f2fe3805d4586afd22942ca83f428 2024-11-19T18:32:50,441 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63180f0cb91b87611df480c0f7ab38fd/info of 63180f0cb91b87611df480c0f7ab38fd into 5e4f2fe3805d4586afd22942ca83f428(size=200.0 K), total size for store is 200.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T18:32:50,441 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:50,441 INFO [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., storeName=63180f0cb91b87611df480c0f7ab38fd/info, priority=13, startTime=1732041170415; duration=0sec 2024-11-19T18:32:50,441 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T18:32:50,441 DEBUG [RS:0;30db5f576be8:33931-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 63180f0cb91b87611df480c0f7ab38fd:info 2024-11-19T18:32:50,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:50,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:51,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:51,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:52,413 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-19T18:32:52,413 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C33931%2C1732041114788.1732041172413 2024-11-19T18:32:52,419 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,419 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,419 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,419 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,419 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,420 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041115172 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041172413 2024-11-19T18:32:52,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35375:35375),(127.0.0.1/127.0.0.1:43735:43735)] 2024-11-19T18:32:52,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041115172 is not closed yet, will try archiving it next time 2024-11-19T18:32:52,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741833_1009 (size=315283) 2024-11-19T18:32:52,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741833_1009 (size=315283) 2024-11-19T18:32:52,424 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 63180f0cb91b87611df480c0f7ab38fd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T18:32:52,428 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c87d7ccb11744e0c940c2b4f02a03d6c is 1080, key is row0246/info:/1732041170397/Put/seqid=0 2024-11-19T18:32:52,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741875_1051 (size=16839) 2024-11-19T18:32:52,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741875_1051 (size=16839) 2024-11-19T18:32:52,432 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c87d7ccb11744e0c940c2b4f02a03d6c 2024-11-19T18:32:52,437 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/.tmp/info/c87d7ccb11744e0c940c2b4f02a03d6c as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c87d7ccb11744e0c940c2b4f02a03d6c 2024-11-19T18:32:52,442 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c87d7ccb11744e0c940c2b4f02a03d6c, entries=11, sequenceid=332, filesize=16.4 K 2024-11-19T18:32:52,443 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 63180f0cb91b87611df480c0f7ab38fd in 19ms, sequenceid=332, compaction requested=false 2024-11-19T18:32:52,443 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 63180f0cb91b87611df480c0f7ab38fd: 2024-11-19T18:32:52,443 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-19T18:32:52,447 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/df5e3e4b433646dcadc864a6031f3bee is 193, key is TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd./info:regioninfo/1732041130656/Put/seqid=0 2024-11-19T18:32:52,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741876_1052 (size=6223) 2024-11-19T18:32:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741876_1052 (size=6223) 2024-11-19T18:32:52,451 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/df5e3e4b433646dcadc864a6031f3bee 2024-11-19T18:32:52,455 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/.tmp/info/df5e3e4b433646dcadc864a6031f3bee as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/info/df5e3e4b433646dcadc864a6031f3bee 2024-11-19T18:32:52,459 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/info/df5e3e4b433646dcadc864a6031f3bee, entries=5, sequenceid=21, filesize=6.1 K 2024-11-19T18:32:52,460 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-19T18:32:52,460 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T18:32:52,460 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 32b10d34e9cd1a30469e17eeed3e0b1e: 2024-11-19T18:32:52,461 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C33931%2C1732041114788.1732041172461 2024-11-19T18:32:52,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,465 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,465 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,465 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041172413 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041172461 2024-11-19T18:32:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741874_1050 (size=731) 2024-11-19T18:32:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741874_1050 (size=731) 2024-11-19T18:32:52,470 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041115172 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs/30db5f576be8%2C33931%2C1732041114788.1732041115172 2024-11-19T18:32:52,471 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/WALs/30db5f576be8,33931,1732041114788/30db5f576be8%2C33931%2C1732041114788.1732041172413 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs/30db5f576be8%2C33931%2C1732041114788.1732041172413 2024-11-19T18:32:52,472 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35375:35375),(127.0.0.1/127.0.0.1:43735:43735)] 2024-11-19T18:32:52,473 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T18:32:52,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:32:52,473 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:32:52,473 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:52,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:52,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:52,473 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:32:52,474 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:32:52,474 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1453087130, stopped=false 2024-11-19T18:32:52,474 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,44151,1732041114739 2024-11-19T18:32:52,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:52,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:52,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:52,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:52,475 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:32:52,476 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:32:52,476 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:52,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:52,476 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,33931,1732041114788' ***** 2024-11-19T18:32:52,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:52,476 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:32:52,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:32:52,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(3091): Received CLOSE for 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(3091): Received CLOSE for 32b10d34e9cd1a30469e17eeed3e0b1e 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,33931,1732041114788 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:33931. 2024-11-19T18:32:52,477 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 63180f0cb91b87611df480c0f7ab38fd, disabling compactions & flushes 2024-11-19T18:32:52,477 DEBUG [RS:0;30db5f576be8:33931 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:52,477 DEBUG [RS:0;30db5f576be8:33931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:52,477 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:52,477 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:52,477 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. after waiting 0 ms 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:32:52,477 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:32:52,477 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:32:52,478 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-19T18:32:52,478 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1325): Online Regions={63180f0cb91b87611df480c0f7ab38fd=TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd., 1588230740=hbase:meta,,1.1588230740, 32b10d34e9cd1a30469e17eeed3e0b1e=TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.} 2024-11-19T18:32:52,478 DEBUG [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 32b10d34e9cd1a30469e17eeed3e0b1e, 63180f0cb91b87611df480c0f7ab38fd 2024-11-19T18:32:52,478 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:32:52,478 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:32:52,478 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:32:52,478 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:32:52,478 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:32:52,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-top, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/0f34981b1d0740639cb50cc38fa0a395, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/df051ce6308042ee8dfba0b8e0e738cc, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/bf9894b427aa43ecb41a771df353b230, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4820acf8b4cd4a1e891c24260a13e48f, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4da614a82bb64c9fa76627a68a54be7e, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/e5df61f8034c4ab89341ff987b847d9c, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89] to archive 2024-11-19T18:32:52,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:32:52,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:52,482 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-19T18:32:52,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-891d4b1623c249b58b05408186d1bfb0 2024-11-19T18:32:52,482 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:32:52,482 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:32:52,483 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041172478Running coprocessor pre-close hooks at 1732041172478Disabling compacts and flushes for region at 1732041172478Disabling writes for close at 1732041172478Writing region close event to WAL at 1732041172479 (+1 ms)Running coprocessor post-close hooks at 1732041172482 (+3 ms)Closed at 1732041172482 2024-11-19T18:32:52,483 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:32:52,483 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-af46b3d584e14931a88ad6eee1670033 2024-11-19T18:32:52,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/0f34981b1d0740639cb50cc38fa0a395 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/0f34981b1d0740639cb50cc38fa0a395 2024-11-19T18:32:52,485 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/TestLogRolling-testLogRolling=86f3d38b72b610f50be445126a88d870-f19fde26f8034f8fbb2ebeb3cb89466e 2024-11-19T18:32:52,486 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7e8c1565a1e24b5e87646125cfe55d69 2024-11-19T18:32:52,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/df051ce6308042ee8dfba0b8e0e738cc to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/df051ce6308042ee8dfba0b8e0e738cc 2024-11-19T18:32:52,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6d4bdbcaaa9a42aba8296e2777d4e8e5 2024-11-19T18:32:52,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/7c8350f857e94069946a1d6c6b63bfb8 2024-11-19T18:32:52,489 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/bf9894b427aa43ecb41a771df353b230 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/bf9894b427aa43ecb41a771df353b230 2024-11-19T18:32:52,490 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/621fb28a346f41848c764072ad0fde56 2024-11-19T18:32:52,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/9c3ec16da3e44ba39c2de0b77a09e159 2024-11-19T18:32:52,492 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4820acf8b4cd4a1e891c24260a13e48f to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4820acf8b4cd4a1e891c24260a13e48f 2024-11-19T18:32:52,493 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/6ada1560a1c940c1a969c4d63ac68c0b 2024-11-19T18:32:52,494 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/18181b1f504e4aa89b6642002f1ad748 2024-11-19T18:32:52,495 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4da614a82bb64c9fa76627a68a54be7e to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/4da614a82bb64c9fa76627a68a54be7e 2024-11-19T18:32:52,496 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/ee89f18ac8d14596bb8249ed5799e983 2024-11-19T18:32:52,497 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/1130d024e3ce4b198158fd3e8e8e4b13 2024-11-19T18:32:52,498 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/e5df61f8034c4ab89341ff987b847d9c to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/e5df61f8034c4ab89341ff987b847d9c 2024-11-19T18:32:52,499 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/c0a2fc1b0522444f96c8533edf061736 2024-11-19T18:32:52,500 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/766f3e8989764e07a7113904ea2a1f12 2024-11-19T18:32:52,501 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/info/8d1297aa365b454eba2b36a016f29d89 2024-11-19T18:32:52,502 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30db5f576be8:44151 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-19T18:32:52,502 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0f34981b1d0740639cb50cc38fa0a395=43081, 7e8c1565a1e24b5e87646125cfe55d69=12516, df051ce6308042ee8dfba0b8e0e738cc=65889, 6d4bdbcaaa9a42aba8296e2777d4e8e5=20078, 7c8350f857e94069946a1d6c6b63bfb8=17906, bf9894b427aa43ecb41a771df353b230=95174, 621fb28a346f41848c764072ad0fde56=21156, 9c3ec16da3e44ba39c2de0b77a09e159=21156, 4820acf8b4cd4a1e891c24260a13e48f=118996, 6ada1560a1c940c1a969c4d63ac68c0b=12516, 18181b1f504e4aa89b6642002f1ad748=22238, 4da614a82bb64c9fa76627a68a54be7e=148409, ee89f18ac8d14596bb8249ed5799e983=16828, 1130d024e3ce4b198158fd3e8e8e4b13=21168, e5df61f8034c4ab89341ff987b847d9c=180886, c0a2fc1b0522444f96c8533edf061736=21171, 766f3e8989764e07a7113904ea2a1f12=12523, 8d1297aa365b454eba2b36a016f29d89=21171] 2024-11-19T18:32:52,505 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/63180f0cb91b87611df480c0f7ab38fd/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-19T18:32:52,506 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 63180f0cb91b87611df480c0f7ab38fd: Waiting for close lock at 1732041172477Running coprocessor pre-close hooks at 1732041172477Disabling compacts and flushes for region at 1732041172477Disabling writes for close at 1732041172477Writing region close event to WAL at 1732041172502 (+25 ms)Running coprocessor post-close hooks at 1732041172506 (+4 ms)Closed at 1732041172506 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732041129963.63180f0cb91b87611df480c0f7ab38fd. 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 32b10d34e9cd1a30469e17eeed3e0b1e, disabling compactions & flushes 2024-11-19T18:32:52,506 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. after waiting 0 ms 2024-11-19T18:32:52,506 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:52,506 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870->hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/86f3d38b72b610f50be445126a88d870/info/2697ac83ab1d48c1af4df311187a62ee-bottom] to archive 2024-11-19T18:32:52,507 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T18:32:52,508 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870 to hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/archive/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/info/2697ac83ab1d48c1af4df311187a62ee.86f3d38b72b610f50be445126a88d870 2024-11-19T18:32:52,509 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-19T18:32:52,511 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/data/default/TestLogRolling-testLogRolling/32b10d34e9cd1a30469e17eeed3e0b1e/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-19T18:32:52,512 INFO [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:52,512 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 32b10d34e9cd1a30469e17eeed3e0b1e: Waiting for close lock at 1732041172506Running coprocessor pre-close hooks at 1732041172506Disabling compacts and flushes for region at 1732041172506Disabling writes for close at 1732041172506Writing region close event to WAL at 1732041172509 (+3 ms)Running coprocessor post-close hooks at 1732041172512 (+3 ms)Closed at 1732041172512 2024-11-19T18:32:52,512 DEBUG [RS_CLOSE_REGION-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732041129963.32b10d34e9cd1a30469e17eeed3e0b1e. 2024-11-19T18:32:52,678 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,33931,1732041114788; all regions closed. 2024-11-19T18:32:52,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,679 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,679 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,679 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741834_1010 (size=8107) 2024-11-19T18:32:52,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741834_1010 (size=8107) 2024-11-19T18:32:52,683 DEBUG [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs 2024-11-19T18:32:52,683 INFO [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C33931%2C1732041114788.meta:.meta(num 1732041115556) 2024-11-19T18:32:52,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,684 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,684 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,684 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741877_1053 (size=780) 2024-11-19T18:32:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741877_1053 (size=780) 2024-11-19T18:32:52,687 DEBUG [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/oldWALs 2024-11-19T18:32:52,687 INFO [RS:0;30db5f576be8:33931 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C33931%2C1732041114788:(num 1732041172461) 2024-11-19T18:32:52,687 DEBUG [RS:0;30db5f576be8:33931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:52,687 INFO [RS:0;30db5f576be8:33931 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:32:52,687 INFO [RS:0;30db5f576be8:33931 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:32:52,687 INFO [RS:0;30db5f576be8:33931 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:32:52,688 INFO [RS:0;30db5f576be8:33931 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:32:52,688 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:32:52,688 INFO [RS:0;30db5f576be8:33931 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33931 2024-11-19T18:32:52,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,33931,1732041114788 2024-11-19T18:32:52,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:32:52,689 INFO [RS:0;30db5f576be8:33931 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:32:52,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,33931,1732041114788] 2024-11-19T18:32:52,693 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,33931,1732041114788 already deleted, retry=false 2024-11-19T18:32:52,693 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,33931,1732041114788 expired; onlineServers=0 2024-11-19T18:32:52,693 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,44151,1732041114739' ***** 2024-11-19T18:32:52,693 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:32:52,693 INFO [M:0;30db5f576be8:44151 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:32:52,693 INFO [M:0;30db5f576be8:44151 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:32:52,693 DEBUG [M:0;30db5f576be8:44151 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:32:52,693 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:32:52,693 DEBUG [M:0;30db5f576be8:44151 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:32:52,693 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041114953 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041114953,5,FailOnTimeoutGroup] 2024-11-19T18:32:52,693 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041114953 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041114953,5,FailOnTimeoutGroup] 2024-11-19T18:32:52,694 INFO [M:0;30db5f576be8:44151 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:32:52,694 INFO [M:0;30db5f576be8:44151 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:32:52,694 DEBUG [M:0;30db5f576be8:44151 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:32:52,694 INFO [M:0;30db5f576be8:44151 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:32:52,694 INFO [M:0;30db5f576be8:44151 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:32:52,694 INFO [M:0;30db5f576be8:44151 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:32:52,694 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:32:52,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:32:52,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:52,695 DEBUG [M:0;30db5f576be8:44151 {}] zookeeper.ZKUtil(347): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:32:52,695 WARN [M:0;30db5f576be8:44151 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:32:52,695 INFO [M:0;30db5f576be8:44151 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/.lastflushedseqids 2024-11-19T18:32:52,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741878_1054 (size=228) 2024-11-19T18:32:52,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741878_1054 (size=228) 2024-11-19T18:32:52,700 INFO [M:0;30db5f576be8:44151 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:32:52,700 INFO [M:0;30db5f576be8:44151 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:32:52,700 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:32:52,701 INFO [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:52,701 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:52,701 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:32:52,701 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:52,701 INFO [M:0;30db5f576be8:44151 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-19T18:32:52,717 DEBUG [M:0;30db5f576be8:44151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14613cd19f7942bda2eb6e8b4be54bea is 82, key is hbase:meta,,1/info:regioninfo/1732041115579/Put/seqid=0 2024-11-19T18:32:52,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:52,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:52,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741879_1055 (size=5672) 2024-11-19T18:32:52,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741879_1055 (size=5672) 2024-11-19T18:32:52,722 INFO [M:0;30db5f576be8:44151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14613cd19f7942bda2eb6e8b4be54bea 2024-11-19T18:32:52,747 DEBUG [M:0;30db5f576be8:44151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/facf9ce399684fd0ab0abde5044d9400 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732041116083/Put/seqid=0 2024-11-19T18:32:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741880_1056 (size=7090) 2024-11-19T18:32:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741880_1056 (size=7090) 2024-11-19T18:32:52,752 INFO [M:0;30db5f576be8:44151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/facf9ce399684fd0ab0abde5044d9400 2024-11-19T18:32:52,756 INFO [M:0;30db5f576be8:44151 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for facf9ce399684fd0ab0abde5044d9400 2024-11-19T18:32:52,770 DEBUG [M:0;30db5f576be8:44151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ad82fa1b4ae475981629988076e5608 is 69, key is 30db5f576be8,33931,1732041114788/rs:state/1732041115022/Put/seqid=0 2024-11-19T18:32:52,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741881_1057 (size=5156) 2024-11-19T18:32:52,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741881_1057 (size=5156) 2024-11-19T18:32:52,775 INFO [M:0;30db5f576be8:44151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ad82fa1b4ae475981629988076e5608 2024-11-19T18:32:52,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:52,791 INFO [RS:0;30db5f576be8:33931 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:32:52,791 INFO [RS:0;30db5f576be8:33931 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,33931,1732041114788; zookeeper connection closed. 2024-11-19T18:32:52,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33931-0x101317feef50001, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:52,791 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d06152 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d06152 2024-11-19T18:32:52,792 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:32:52,793 DEBUG [M:0;30db5f576be8:44151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85f2e2490d05464db1eeb44e47e045dd is 52, key is load_balancer_on/state:d/1732041115712/Put/seqid=0 2024-11-19T18:32:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741882_1058 (size=5056) 2024-11-19T18:32:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741882_1058 (size=5056) 2024-11-19T18:32:52,798 INFO [M:0;30db5f576be8:44151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85f2e2490d05464db1eeb44e47e045dd 2024-11-19T18:32:52,802 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14613cd19f7942bda2eb6e8b4be54bea as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14613cd19f7942bda2eb6e8b4be54bea 2024-11-19T18:32:52,806 INFO [M:0;30db5f576be8:44151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14613cd19f7942bda2eb6e8b4be54bea, entries=8, sequenceid=125, filesize=5.5 K 2024-11-19T18:32:52,807 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/facf9ce399684fd0ab0abde5044d9400 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/facf9ce399684fd0ab0abde5044d9400 2024-11-19T18:32:52,811 INFO [M:0;30db5f576be8:44151 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for facf9ce399684fd0ab0abde5044d9400 2024-11-19T18:32:52,811 INFO [M:0;30db5f576be8:44151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/facf9ce399684fd0ab0abde5044d9400, entries=13, sequenceid=125, filesize=6.9 K 2024-11-19T18:32:52,812 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ad82fa1b4ae475981629988076e5608 as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ad82fa1b4ae475981629988076e5608 2024-11-19T18:32:52,816 INFO [M:0;30db5f576be8:44151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ad82fa1b4ae475981629988076e5608, entries=1, sequenceid=125, filesize=5.0 K 2024-11-19T18:32:52,817 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85f2e2490d05464db1eeb44e47e045dd as hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/85f2e2490d05464db1eeb44e47e045dd 2024-11-19T18:32:52,820 INFO [M:0;30db5f576be8:44151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33131/user/jenkins/test-data/0e5dc034-28f8-416a-9dd0-4b77ff7b3165/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/85f2e2490d05464db1eeb44e47e045dd, entries=1, sequenceid=125, filesize=4.9 K 2024-11-19T18:32:52,821 INFO [M:0;30db5f576be8:44151 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=125, compaction requested=false 2024-11-19T18:32:52,823 INFO [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:52,823 DEBUG [M:0;30db5f576be8:44151 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041172700Disabling compacts and flushes for region at 1732041172700Disabling writes for close at 1732041172701 (+1 ms)Obtaining lock to block concurrent updates at 1732041172701Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732041172701Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732041172701Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732041172702 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732041172702Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732041172716 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732041172716Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732041172726 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732041172746 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732041172746Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732041172756 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732041172770 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732041172770Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732041172779 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732041172792 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732041172792Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52ef93f6: reopening flushed file at 1732041172802 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46fc708b: reopening flushed file at 1732041172806 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6beb805e: reopening flushed file at 1732041172811 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2af6fc2a: reopening flushed file at 1732041172816 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=125, compaction requested=false at 1732041172821 (+5 ms)Writing region close event to WAL at 1732041172823 (+2 ms)Closed at 1732041172823 2024-11-19T18:32:52,823 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,824 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,824 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,824 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,824 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:52,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35377 is added to blk_1073741830_1006 (size=61320) 2024-11-19T18:32:52,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42347 is added to blk_1073741830_1006 (size=61320) 2024-11-19T18:32:52,826 INFO [M:0;30db5f576be8:44151 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:32:52,826 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:32:52,826 INFO [M:0;30db5f576be8:44151 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44151 2024-11-19T18:32:52,827 INFO [M:0;30db5f576be8:44151 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:32:52,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:52,929 INFO [M:0;30db5f576be8:44151 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:32:52,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44151-0x101317feef50000, quorum=127.0.0.1:63904, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:52,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41a74ab6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:52,932 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d7cc900{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:52,932 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:52,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51585bde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:52,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9612b29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:52,934 WARN [BP-21126910-172.17.0.2-1732041114065 heartbeating to localhost/127.0.0.1:33131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:32:52,934 WARN [BP-21126910-172.17.0.2-1732041114065 heartbeating to localhost/127.0.0.1:33131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-21126910-172.17.0.2-1732041114065 (Datanode Uuid 0a560c69-43c3-4ced-913d-4f23635c8c31) service to localhost/127.0.0.1:33131 2024-11-19T18:32:52,934 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:32:52,935 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:32:52,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data3/current/BP-21126910-172.17.0.2-1732041114065 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:52,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data4/current/BP-21126910-172.17.0.2-1732041114065 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:52,935 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:32:52,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ecf816b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:52,938 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5411f427{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:52,938 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:52,938 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@459363d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:52,938 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45890504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:52,940 WARN [BP-21126910-172.17.0.2-1732041114065 heartbeating to localhost/127.0.0.1:33131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:32:52,940 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:32:52,940 WARN [BP-21126910-172.17.0.2-1732041114065 heartbeating to localhost/127.0.0.1:33131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-21126910-172.17.0.2-1732041114065 (Datanode Uuid 889d51b1-b22e-4e1e-8174-e18adb36ee9c) service to localhost/127.0.0.1:33131 2024-11-19T18:32:52,940 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:32:52,940 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data1/current/BP-21126910-172.17.0.2-1732041114065 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:52,940 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/cluster_1e25a420-ec12-1f0e-d497-8af2c891c17a/data/data2/current/BP-21126910-172.17.0.2-1732041114065 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:52,941 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:32:52,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63d9f0e2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:32:52,947 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38dc0fd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:52,947 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:52,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78d2a49d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:52,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59c539e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:52,955 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:32:52,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:32:52,990 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 205) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:33131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33131 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=21 (was 59), ProcessCount=11 (was 11), AvailableMemoryMB=6512 (was 6751) 2024-11-19T18:32:52,998 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=21, ProcessCount=11, AvailableMemoryMB=6512 2024-11-19T18:32:52,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T18:32:52,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.log.dir so I do NOT create it in target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d6125470-dfaa-dce9-98c4-6f738a67b4e9/hadoop.tmp.dir so I do NOT create it in target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b, deleteOnExit=true 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/test.cache.data in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T18:32:52,999 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T18:32:52,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/nfs.dump.dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/java.io.tmpdir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T18:32:53,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T18:32:53,013 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:32:53,038 INFO [regionserver/30db5f576be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:32:53,072 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:32:53,076 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:32:53,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:32:53,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:32:53,077 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:32:53,078 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:32:53,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134d2ab8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:32:53,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a96c77e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:32:53,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72d15c59{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/java.io.tmpdir/jetty-localhost-38485-hadoop-hdfs-3_4_1-tests_jar-_-any-16318946106865687208/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:32:53,192 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55be9e2c{HTTP/1.1, (http/1.1)}{localhost:38485} 2024-11-19T18:32:53,192 INFO [Time-limited test {}] server.Server(415): Started @296575ms 2024-11-19T18:32:53,205 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T18:32:53,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:32:53,260 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:32:53,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:32:53,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:32:53,261 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T18:32:53,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@150dab73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:32:53,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47f122ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:32:53,377 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41f14207{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/java.io.tmpdir/jetty-localhost-41375-hadoop-hdfs-3_4_1-tests_jar-_-any-4993282645528127225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:53,377 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a2c3a40{HTTP/1.1, (http/1.1)}{localhost:41375} 2024-11-19T18:32:53,377 INFO [Time-limited test {}] server.Server(415): Started @296759ms 2024-11-19T18:32:53,378 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:32:53,407 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T18:32:53,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T18:32:53,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T18:32:53,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T18:32:53,410 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T18:32:53,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380ffe40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,AVAILABLE} 2024-11-19T18:32:53,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35c58925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T18:32:53,475 WARN [Thread-2469 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data2/current/BP-1663235635-172.17.0.2-1732041173019/current, will proceed with Du for space computation calculation, 2024-11-19T18:32:53,475 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data1/current/BP-1663235635-172.17.0.2-1732041173019/current, will proceed with Du for space computation calculation, 2024-11-19T18:32:53,496 WARN [Thread-2447 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:32:53,498 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d2ea0f9d1cc96ea with lease ID 0x92ef74866ee743f5: Processing first storage report for DS-13169735-cd2f-4ecb-b7a7-1c3ab8947c73 from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=c930c368-be52-418f-9748-4ee4c706a9e2, infoPort=42265, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019) 2024-11-19T18:32:53,498 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d2ea0f9d1cc96ea with lease ID 0x92ef74866ee743f5: from storage DS-13169735-cd2f-4ecb-b7a7-1c3ab8947c73 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=c930c368-be52-418f-9748-4ee4c706a9e2, infoPort=42265, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:32:53,498 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d2ea0f9d1cc96ea with lease ID 0x92ef74866ee743f5: Processing first storage report for DS-722201d3-8453-4922-8ccf-bb3a937d1e3f from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=c930c368-be52-418f-9748-4ee4c706a9e2, infoPort=42265, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019) 2024-11-19T18:32:53,498 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d2ea0f9d1cc96ea with lease ID 0x92ef74866ee743f5: from storage DS-722201d3-8453-4922-8ccf-bb3a937d1e3f node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=c930c368-be52-418f-9748-4ee4c706a9e2, infoPort=42265, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:32:53,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32c86c86{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/java.io.tmpdir/jetty-localhost-37973-hadoop-hdfs-3_4_1-tests_jar-_-any-9453776328240425052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:53,527 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a7b167c{HTTP/1.1, (http/1.1)}{localhost:37973} 2024-11-19T18:32:53,527 INFO [Time-limited test {}] server.Server(415): Started @296909ms 2024-11-19T18:32:53,528 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T18:32:53,640 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data3/current/BP-1663235635-172.17.0.2-1732041173019/current, will proceed with Du for space computation calculation, 2024-11-19T18:32:53,640 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data4/current/BP-1663235635-172.17.0.2-1732041173019/current, will proceed with Du for space computation calculation, 2024-11-19T18:32:53,657 WARN [Thread-2483 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T18:32:53,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe085f7111581ad65 with lease ID 0x92ef74866ee743f6: Processing first storage report for DS-b2e748ab-f6ab-48c7-9d09-1b49422cbb00 from datanode DatanodeRegistration(127.0.0.1:40547, datanodeUuid=5125daaa-f034-42a9-ba64-0836715e6119, infoPort=33147, infoSecurePort=0, ipcPort=43643, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019) 2024-11-19T18:32:53,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe085f7111581ad65 with lease ID 0x92ef74866ee743f6: from storage DS-b2e748ab-f6ab-48c7-9d09-1b49422cbb00 node DatanodeRegistration(127.0.0.1:40547, datanodeUuid=5125daaa-f034-42a9-ba64-0836715e6119, infoPort=33147, infoSecurePort=0, ipcPort=43643, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:32:53,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe085f7111581ad65 with lease ID 0x92ef74866ee743f6: Processing first storage report for DS-1af05fd4-4f8c-4be2-8190-1e6aa5e4e4d4 from datanode DatanodeRegistration(127.0.0.1:40547, datanodeUuid=5125daaa-f034-42a9-ba64-0836715e6119, infoPort=33147, infoSecurePort=0, ipcPort=43643, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019) 2024-11-19T18:32:53,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe085f7111581ad65 with lease ID 0x92ef74866ee743f6: from storage DS-1af05fd4-4f8c-4be2-8190-1e6aa5e4e4d4 node DatanodeRegistration(127.0.0.1:40547, datanodeUuid=5125daaa-f034-42a9-ba64-0836715e6119, infoPort=33147, infoSecurePort=0, ipcPort=43643, storageInfo=lv=-57;cid=testClusterID;nsid=2071760732;c=1732041173019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T18:32:53,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:53,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:53,750 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9 2024-11-19T18:32:53,753 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/zookeeper_0, clientPort=58657, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T18:32:53,754 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58657 2024-11-19T18:32:53,754 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:32:53,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741825_1001 (size=7) 2024-11-19T18:32:53,764 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38 with version=8 2024-11-19T18:32:53,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34103/user/jenkins/test-data/dfba4545-7c18-3f75-7695-d7f0005e8198/hbase-staging 2024-11-19T18:32:53,766 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T18:32:53,766 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:32:53,767 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41951 2024-11-19T18:32:53,768 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41951 connecting to ZooKeeper ensemble=127.0.0.1:58657 2024-11-19T18:32:53,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419510x0, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:32:53,775 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41951-0x1013180d5880000 connected 2024-11-19T18:32:53,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:53,793 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38, hbase.cluster.distributed=false 2024-11-19T18:32:53,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:32:53,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-19T18:32:53,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41951 2024-11-19T18:32:53,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41951 2024-11-19T18:32:53,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-19T18:32:53,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-19T18:32:53,812 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30db5f576be8:0 server-side Connection retries=45 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T18:32:53,812 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T18:32:53,813 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43213 2024-11-19T18:32:53,814 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43213 connecting to ZooKeeper ensemble=127.0.0.1:58657 2024-11-19T18:32:53,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432130x0, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T18:32:53,820 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432130x0, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:53,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43213-0x1013180d5880001 connected 2024-11-19T18:32:53,820 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T18:32:53,821 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T18:32:53,821 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T18:32:53,822 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T18:32:53,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43213 2024-11-19T18:32:53,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43213 2024-11-19T18:32:53,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43213 2024-11-19T18:32:53,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43213 2024-11-19T18:32:53,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43213 2024-11-19T18:32:53,835 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30db5f576be8:41951 2024-11-19T18:32:53,835 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:32:53,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:32:53,837 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T18:32:53,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,838 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T18:32:53,839 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30db5f576be8,41951,1732041173766 from backup master directory 2024-11-19T18:32:53,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:32:53,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T18:32:53,842 WARN [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:32:53,842 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,845 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/hbase.id] with ID: fd842d31-08e8-42c4-9297-b9ea40f9aabf 2024-11-19T18:32:53,845 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/.tmp/hbase.id 2024-11-19T18:32:53,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:32:53,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741826_1002 (size=42) 2024-11-19T18:32:53,851 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/.tmp/hbase.id]:[hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/hbase.id] 2024-11-19T18:32:53,860 INFO [master/30db5f576be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:53,860 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T18:32:53,861 INFO [master/30db5f576be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T18:32:53,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:32:53,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741827_1003 (size=196) 2024-11-19T18:32:53,869 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T18:32:53,870 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T18:32:53,870 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:32:53,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:32:53,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741828_1004 (size=1189) 2024-11-19T18:32:53,876 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store 2024-11-19T18:32:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:32:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741829_1005 (size=34) 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:32:53,882 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:53,882 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041173882Disabling compacts and flushes for region at 1732041173882Disabling writes for close at 1732041173882Writing region close event to WAL at 1732041173882Closed at 1732041173882 2024-11-19T18:32:53,882 WARN [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/.initializing 2024-11-19T18:32:53,882 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/WALs/30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,884 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C41951%2C1732041173766, suffix=, logDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/WALs/30db5f576be8,41951,1732041173766, archiveDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/oldWALs, maxLogs=10 2024-11-19T18:32:53,885 INFO [master/30db5f576be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C41951%2C1732041173766.1732041173885 2024-11-19T18:32:53,888 INFO [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/WALs/30db5f576be8,41951,1732041173766/30db5f576be8%2C41951%2C1732041173766.1732041173885 2024-11-19T18:32:53,889 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33147:33147),(127.0.0.1/127.0.0.1:42265:42265)] 2024-11-19T18:32:53,890 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:32:53,890 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:53,890 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,890 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T18:32:53,892 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:53,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T18:32:53,894 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:32:53,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T18:32:53,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:32:53,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T18:32:53,896 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T18:32:53,897 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,898 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,898 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,899 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,899 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,899 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T18:32:53,900 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T18:32:53,902 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:32:53,902 INFO [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848586, jitterRate=0.07903306186199188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T18:32:53,903 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732041173890Initializing all the Stores at 1732041173891 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041173891Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041173891Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041173891Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041173891Cleaning up temporary data from old regions at 1732041173899 (+8 ms)Region opened successfully at 1732041173903 (+4 ms) 2024-11-19T18:32:53,903 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T18:32:53,905 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31c2f099, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:32:53,906 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T18:32:53,906 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T18:32:53,906 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T18:32:53,906 INFO [master/30db5f576be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T18:32:53,907 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T18:32:53,907 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T18:32:53,907 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T18:32:53,909 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T18:32:53,910 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T18:32:53,911 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T18:32:53,911 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T18:32:53,912 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T18:32:53,913 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T18:32:53,913 INFO [master/30db5f576be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T18:32:53,914 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T18:32:53,916 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T18:32:53,916 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T18:32:53,917 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T18:32:53,919 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T18:32:53,920 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T18:32:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,922 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30db5f576be8,41951,1732041173766, sessionid=0x1013180d5880000, setting cluster-up flag (Was=false) 2024-11-19T18:32:53,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,932 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T18:32:53,933 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:53,941 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T18:32:53,942 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30db5f576be8,41951,1732041173766 2024-11-19T18:32:53,943 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T18:32:53,945 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T18:32:53,945 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T18:32:53,945 INFO [master/30db5f576be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T18:32:53,945 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30db5f576be8,41951,1732041173766 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30db5f576be8:0, corePoolSize=5, maxPoolSize=5 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30db5f576be8:0, corePoolSize=10, maxPoolSize=10 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:32:53,946 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732041203947 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T18:32:53,947 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:53,948 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:32:53,948 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T18:32:53,948 INFO [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T18:32:53,949 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041173949,5,FailOnTimeoutGroup] 2024-11-19T18:32:53,949 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,949 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T18:32:53,951 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041173949,5,FailOnTimeoutGroup] 2024-11-19T18:32:53,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:53,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T18:32:53,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:53,951 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:32:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741831_1007 (size=1321) 2024-11-19T18:32:53,956 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T18:32:53,956 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38 2024-11-19T18:32:53,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:32:53,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741832_1008 (size=32) 2024-11-19T18:32:53,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:53,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:32:53,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:32:53,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:53,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:32:53,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:32:53,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:53,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:32:53,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:32:53,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:53,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:32:53,967 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:32:53,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:53,968 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:53,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:32:53,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740 2024-11-19T18:32:53,969 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740 2024-11-19T18:32:53,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:32:53,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:32:53,970 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:32:53,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:32:53,973 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T18:32:53,973 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737392, jitterRate=-0.062358155846595764}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732041173962Initializing all the Stores at 1732041173962Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041173962Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041173962Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041173962Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041173962Cleaning up temporary data from old regions at 1732041173970 (+8 ms)Region opened successfully at 1732041173974 (+4 ms) 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:32:53,974 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:32:53,974 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:32:53,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041173974Disabling compacts and flushes for region at 1732041173974Disabling writes for close at 1732041173974Writing region close event to WAL at 1732041173974Closed at 1732041173974 2024-11-19T18:32:53,975 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:32:53,975 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T18:32:53,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T18:32:53,976 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:32:53,977 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T18:32:54,025 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(746): ClusterId : fd842d31-08e8-42c4-9297-b9ea40f9aabf 2024-11-19T18:32:54,025 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T18:32:54,027 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T18:32:54,027 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T18:32:54,030 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T18:32:54,030 DEBUG [RS:0;30db5f576be8:43213 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50e5e340, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30db5f576be8/172.17.0.2:0 2024-11-19T18:32:54,042 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30db5f576be8:43213 2024-11-19T18:32:54,042 INFO [RS:0;30db5f576be8:43213 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T18:32:54,042 INFO [RS:0;30db5f576be8:43213 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T18:32:54,042 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T18:32:54,043 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(2659): reportForDuty to master=30db5f576be8,41951,1732041173766 with port=43213, startcode=1732041173812 2024-11-19T18:32:54,043 DEBUG [RS:0;30db5f576be8:43213 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T18:32:54,045 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37111, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T18:32:54,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41951 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41951 {}] master.ServerManager(517): Registering regionserver=30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,047 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38 2024-11-19T18:32:54,047 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46723 2024-11-19T18:32:54,047 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T18:32:54,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:32:54,050 DEBUG [RS:0;30db5f576be8:43213 {}] zookeeper.ZKUtil(111): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,050 WARN [RS:0;30db5f576be8:43213 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T18:32:54,050 INFO [RS:0;30db5f576be8:43213 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:32:54,051 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,051 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30db5f576be8,43213,1732041173812] 2024-11-19T18:32:54,053 INFO [RS:0;30db5f576be8:43213 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T18:32:54,056 INFO [RS:0;30db5f576be8:43213 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T18:32:54,056 INFO [RS:0;30db5f576be8:43213 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T18:32:54,056 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,057 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T18:32:54,057 INFO [RS:0;30db5f576be8:43213 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T18:32:54,057 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30db5f576be8:0, corePoolSize=2, maxPoolSize=2 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30db5f576be8:0, corePoolSize=1, maxPoolSize=1 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:32:54,058 DEBUG [RS:0;30db5f576be8:43213 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30db5f576be8:0, corePoolSize=3, maxPoolSize=3 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,058 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,43213,1732041173812-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:32:54,073 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T18:32:54,073 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,43213,1732041173812-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,073 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,073 INFO [RS:0;30db5f576be8:43213 {}] regionserver.Replication(171): 30db5f576be8,43213,1732041173812 started 2024-11-19T18:32:54,087 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,087 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1482): Serving as 30db5f576be8,43213,1732041173812, RpcServer on 30db5f576be8/172.17.0.2:43213, sessionid=0x1013180d5880001 2024-11-19T18:32:54,087 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T18:32:54,087 DEBUG [RS:0;30db5f576be8:43213 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,087 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,43213,1732041173812' 2024-11-19T18:32:54,087 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T18:32:54,087 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30db5f576be8,43213,1732041173812' 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T18:32:54,088 DEBUG [RS:0;30db5f576be8:43213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T18:32:54,088 INFO [RS:0;30db5f576be8:43213 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T18:32:54,088 INFO [RS:0;30db5f576be8:43213 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T18:32:54,127 WARN [30db5f576be8:41951 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-19T18:32:54,190 INFO [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C43213%2C1732041173812, suffix=, logDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/30db5f576be8,43213,1732041173812, archiveDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs, maxLogs=32 2024-11-19T18:32:54,191 INFO [RS:0;30db5f576be8:43213 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C43213%2C1732041173812.1732041174190 2024-11-19T18:32:54,196 INFO [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/30db5f576be8,43213,1732041173812/30db5f576be8%2C43213%2C1732041173812.1732041174190 2024-11-19T18:32:54,197 DEBUG [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42265:42265),(127.0.0.1/127.0.0.1:33147:33147)] 2024-11-19T18:32:54,378 DEBUG [30db5f576be8:41951 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T18:32:54,378 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,379 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,43213,1732041173812, state=OPENING 2024-11-19T18:32:54,381 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T18:32:54,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:54,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:54,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:32:54,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:32:54,383 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T18:32:54,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,43213,1732041173812}] 2024-11-19T18:32:54,535 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T18:32:54,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42305, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T18:32:54,540 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T18:32:54,540 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:32:54,542 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30db5f576be8%2C43213%2C1732041173812.meta, suffix=.meta, logDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/30db5f576be8,43213,1732041173812, archiveDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs, maxLogs=32 2024-11-19T18:32:54,542 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30db5f576be8%2C43213%2C1732041173812.meta.1732041174542.meta 2024-11-19T18:32:54,549 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/30db5f576be8,43213,1732041173812/30db5f576be8%2C43213%2C1732041173812.meta.1732041174542.meta 2024-11-19T18:32:54,552 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42265:42265),(127.0.0.1/127.0.0.1:33147:33147)] 2024-11-19T18:32:54,556 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T18:32:54,557 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T18:32:54,557 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T18:32:54,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T18:32:54,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T18:32:54,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:54,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:54,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T18:32:54,560 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T18:32:54,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:54,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:54,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T18:32:54,561 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T18:32:54,561 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:54,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:54,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T18:32:54,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T18:32:54,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T18:32:54,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T18:32:54,562 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T18:32:54,563 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740 2024-11-19T18:32:54,564 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740 2024-11-19T18:32:54,565 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T18:32:54,565 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T18:32:54,565 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T18:32:54,566 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T18:32:54,567 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736583, jitterRate=-0.06338635087013245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T18:32:54,567 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T18:32:54,567 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732041174557Writing region info on filesystem at 1732041174557Initializing all the Stores at 1732041174558 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041174558Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041174558Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732041174558Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732041174558Cleaning up temporary data from old regions at 1732041174565 (+7 ms)Running coprocessor post-open hooks at 1732041174567 (+2 ms)Region opened successfully at 1732041174567 2024-11-19T18:32:54,568 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732041174535 2024-11-19T18:32:54,570 DEBUG [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T18:32:54,570 INFO [RS_OPEN_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T18:32:54,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,571 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30db5f576be8,43213,1732041173812, state=OPEN 2024-11-19T18:32:54,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:32:54,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T18:32:54,577 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:32:54,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T18:32:54,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T18:32:54,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30db5f576be8,43213,1732041173812 in 194 msec 2024-11-19T18:32:54,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T18:32:54,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-19T18:32:54,582 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T18:32:54,582 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T18:32:54,583 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:32:54,583 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,43213,1732041173812, seqNum=-1] 2024-11-19T18:32:54,583 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:32:54,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54547, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:32:54,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 644 msec 2024-11-19T18:32:54,588 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732041174588, completionTime=-1 2024-11-19T18:32:54,588 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T18:32:54,588 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-19T18:32:54,590 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-19T18:32:54,590 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732041234590 2024-11-19T18:32:54,590 INFO [master/30db5f576be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732041294590 2024-11-19T18:32:54,590 INFO [master/30db5f576be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-19T18:32:54,590 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,591 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,591 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,591 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30db5f576be8:41951, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,591 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,591 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,592 DEBUG [master/30db5f576be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.752sec 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T18:32:54,594 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T18:32:54,596 DEBUG [master/30db5f576be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T18:32:54,596 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T18:32:54,596 INFO [master/30db5f576be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30db5f576be8,41951,1732041173766-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T18:32:54,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624ed4c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:32:54,625 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30db5f576be8,41951,-1 for getting cluster id 2024-11-19T18:32:54,625 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T18:32:54,626 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fd842d31-08e8-42c4-9297-b9ea40f9aabf' 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fd842d31-08e8-42c4-9297-b9ea40f9aabf" 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770fcd5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30db5f576be8,41951,-1] 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T18:32:54,627 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,628 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48418, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T18:32:54,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a598908, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T18:32:54,629 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T18:32:54,630 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30db5f576be8,43213,1732041173812, seqNum=-1] 2024-11-19T18:32:54,630 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T18:32:54,631 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T18:32:54,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30db5f576be8,41951,1732041173766 2024-11-19T18:32:54,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T18:32:54,634 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T18:32:54,634 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T18:32:54,636 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs, maxLogs=32 2024-11-19T18:32:54,636 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732041174636 2024-11-19T18:32:54,641 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1/test.com%2C8080%2C1.1732041174636 2024-11-19T18:32:54,642 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33147:33147),(127.0.0.1/127.0.0.1:42265:42265)] 2024-11-19T18:32:54,642 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732041174642 2024-11-19T18:32:54,646 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,646 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,647 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1/test.com%2C8080%2C1.1732041174636 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1/test.com%2C8080%2C1.1732041174642 2024-11-19T18:32:54,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42265:42265),(127.0.0.1/127.0.0.1:33147:33147)] 2024-11-19T18:32:54,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1/test.com%2C8080%2C1.1732041174636 is not closed yet, will try archiving it next time 2024-11-19T18:32:54,648 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741835_1011 (size=93) 2024-11-19T18:32:54,648 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741835_1011 (size=93) 2024-11-19T18:32:54,650 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/WALs/test.com,8080,1/test.com%2C8080%2C1.1732041174636 to hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs/test.com%2C8080%2C1.1732041174636 2024-11-19T18:32:54,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741836_1012 (size=93) 2024-11-19T18:32:54,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741836_1012 (size=93) 2024-11-19T18:32:54,652 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs 2024-11-19T18:32:54,653 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732041174642) 2024-11-19T18:32:54,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T18:32:54,653 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:32:54,653 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:54,653 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,653 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,653 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T18:32:54,653 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T18:32:54,653 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1956030543, stopped=false 2024-11-19T18:32:54,653 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30db5f576be8,41951,1732041173766 2024-11-19T18:32:54,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:54,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T18:32:54,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:54,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:54,655 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:32:54,655 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T18:32:54,655 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:54,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,655 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30db5f576be8,43213,1732041173812' ***** 2024-11-19T18:32:54,655 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T18:32:54,655 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T18:32:54,656 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(959): stopping server 30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30db5f576be8:43213. 2024-11-19T18:32:54,656 DEBUG [RS:0;30db5f576be8:43213 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T18:32:54,656 DEBUG [RS:0;30db5f576be8:43213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T18:32:54,656 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T18:32:54,656 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T18:32:54,657 DEBUG [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T18:32:54,657 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T18:32:54,657 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T18:32:54,657 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T18:32:54,657 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T18:32:54,657 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T18:32:54,657 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T18:32:54,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:54,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T18:32:54,672 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/.tmp/ns/d738ed4347f341d782c6e93e134f690d is 43, key is default/ns:d/1732041174585/Put/seqid=0 2024-11-19T18:32:54,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741837_1013 (size=5153) 2024-11-19T18:32:54,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741837_1013 (size=5153) 2024-11-19T18:32:54,677 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/.tmp/ns/d738ed4347f341d782c6e93e134f690d 2024-11-19T18:32:54,682 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/.tmp/ns/d738ed4347f341d782c6e93e134f690d as hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/ns/d738ed4347f341d782c6e93e134f690d 2024-11-19T18:32:54,686 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/ns/d738ed4347f341d782c6e93e134f690d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T18:32:54,687 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-19T18:32:54,691 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T18:32:54,692 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T18:32:54,692 INFO [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T18:32:54,692 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732041174657Running coprocessor pre-close hooks at 1732041174657Disabling compacts and flushes for region at 1732041174657Disabling writes for close at 1732041174657Obtaining lock to block concurrent updates at 1732041174657Preparing flush snapshotting stores in 1588230740 at 1732041174657Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732041174657Flushing stores of hbase:meta,,1.1588230740 at 1732041174658 (+1 ms)Flushing 1588230740/ns: creating writer at 1732041174658Flushing 1588230740/ns: appending metadata at 1732041174672 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732041174672Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@398f7c41: reopening flushed file at 1732041174681 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732041174687 (+6 ms)Writing region close event to WAL at 1732041174688 (+1 ms)Running coprocessor post-close hooks at 1732041174692 (+4 ms)Closed at 1732041174692 2024-11-19T18:32:54,692 DEBUG [RS_CLOSE_META-regionserver/30db5f576be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T18:32:54,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,45935,1732040982421/30db5f576be8%2C45935%2C1732040982421.1732040982738 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:54,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44171/user/jenkins/test-data/402309cc-f25b-58c6-95d8-07a29809aa39/WALs/30db5f576be8,34263,1732040981107/30db5f576be8%2C34263%2C1732040981107.meta.1732040982139.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T18:32:54,857 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(976): stopping server 30db5f576be8,43213,1732041173812; all regions closed. 2024-11-19T18:32:54,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,858 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,858 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741834_1010 (size=1152) 2024-11-19T18:32:54,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741834_1010 (size=1152) 2024-11-19T18:32:54,862 DEBUG [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs 2024-11-19T18:32:54,862 INFO [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C43213%2C1732041173812.meta:.meta(num 1732041174542) 2024-11-19T18:32:54,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741833_1009 (size=93) 2024-11-19T18:32:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741833_1009 (size=93) 2024-11-19T18:32:54,866 DEBUG [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/oldWALs 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30db5f576be8%2C43213%2C1732041173812:(num 1732041174190) 2024-11-19T18:32:54,866 DEBUG [RS:0;30db5f576be8:43213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] hbase.ChoreService(370): Chore service for: regionserver/30db5f576be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:32:54,866 INFO [regionserver/30db5f576be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:32:54,866 INFO [RS:0;30db5f576be8:43213 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43213 2024-11-19T18:32:54,868 INFO [RS:0;30db5f576be8:43213 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:32:54,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T18:32:54,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30db5f576be8,43213,1732041173812 2024-11-19T18:32:54,871 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30db5f576be8,43213,1732041173812] 2024-11-19T18:32:54,872 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30db5f576be8,43213,1732041173812 already deleted, retry=false 2024-11-19T18:32:54,872 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30db5f576be8,43213,1732041173812 expired; onlineServers=0 2024-11-19T18:32:54,873 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30db5f576be8,41951,1732041173766' ***** 2024-11-19T18:32:54,873 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T18:32:54,873 DEBUG [M:0;30db5f576be8:41951 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T18:32:54,873 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T18:32:54,873 DEBUG [M:0;30db5f576be8:41951 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T18:32:54,873 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041173949 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.large.0-1732041173949,5,FailOnTimeoutGroup] 2024-11-19T18:32:54,873 DEBUG [master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041173949 {}] cleaner.HFileCleaner(306): Exit Thread[master/30db5f576be8:0:becomeActiveMaster-HFileCleaner.small.0-1732041173949,5,FailOnTimeoutGroup] 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] hbase.ChoreService(370): Chore service for: master/30db5f576be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T18:32:54,873 DEBUG [M:0;30db5f576be8:41951 {}] master.HMaster(1795): Stopping service threads 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T18:32:54,873 INFO [M:0;30db5f576be8:41951 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T18:32:54,873 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T18:32:54,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T18:32:54,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T18:32:54,874 DEBUG [M:0;30db5f576be8:41951 {}] zookeeper.ZKUtil(347): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T18:32:54,874 WARN [M:0;30db5f576be8:41951 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T18:32:54,875 INFO [M:0;30db5f576be8:41951 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/.lastflushedseqids 2024-11-19T18:32:54,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741838_1014 (size=99) 2024-11-19T18:32:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741838_1014 (size=99) 2024-11-19T18:32:54,880 INFO [M:0;30db5f576be8:41951 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T18:32:54,880 INFO [M:0;30db5f576be8:41951 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T18:32:54,880 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T18:32:54,880 INFO [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:54,880 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:54,881 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T18:32:54,881 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:54,881 INFO [M:0;30db5f576be8:41951 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T18:32:54,896 DEBUG [M:0;30db5f576be8:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6529ecde9184daea2cb7c654b732a85 is 82, key is hbase:meta,,1/info:regioninfo/1732041174570/Put/seqid=0 2024-11-19T18:32:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741839_1015 (size=5672) 2024-11-19T18:32:54,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741839_1015 (size=5672) 2024-11-19T18:32:54,901 INFO [M:0;30db5f576be8:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6529ecde9184daea2cb7c654b732a85 2024-11-19T18:32:54,919 DEBUG [M:0;30db5f576be8:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eaafef7f2b334d2ea6afe86d82dbe3a5 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732041174588/Put/seqid=0 2024-11-19T18:32:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741840_1016 (size=5275) 2024-11-19T18:32:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741840_1016 (size=5275) 2024-11-19T18:32:54,923 INFO [M:0;30db5f576be8:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eaafef7f2b334d2ea6afe86d82dbe3a5 2024-11-19T18:32:54,941 DEBUG [M:0;30db5f576be8:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d505943131054d14ad78eb0d6364344f is 69, key is 30db5f576be8,43213,1732041173812/rs:state/1732041174046/Put/seqid=0 2024-11-19T18:32:54,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741841_1017 (size=5156) 2024-11-19T18:32:54,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741841_1017 (size=5156) 2024-11-19T18:32:54,946 INFO [M:0;30db5f576be8:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d505943131054d14ad78eb0d6364344f 2024-11-19T18:32:54,963 DEBUG [M:0;30db5f576be8:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae2eaec413432eb29931b9b426e252 is 52, key is load_balancer_on/state:d/1732041174634/Put/seqid=0 2024-11-19T18:32:54,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741842_1018 (size=5056) 2024-11-19T18:32:54,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741842_1018 (size=5056) 2024-11-19T18:32:54,967 INFO [M:0;30db5f576be8:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae2eaec413432eb29931b9b426e252 2024-11-19T18:32:54,971 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6529ecde9184daea2cb7c654b732a85 as hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b6529ecde9184daea2cb7c654b732a85 2024-11-19T18:32:54,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:54,972 INFO [RS:0;30db5f576be8:43213 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:32:54,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43213-0x1013180d5880001, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:54,972 INFO [RS:0;30db5f576be8:43213 {}] regionserver.HRegionServer(1031): Exiting; stopping=30db5f576be8,43213,1732041173812; zookeeper connection closed. 2024-11-19T18:32:54,972 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@395f0626 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@395f0626 2024-11-19T18:32:54,972 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T18:32:54,975 INFO [M:0;30db5f576be8:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b6529ecde9184daea2cb7c654b732a85, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T18:32:54,976 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eaafef7f2b334d2ea6afe86d82dbe3a5 as hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eaafef7f2b334d2ea6afe86d82dbe3a5 2024-11-19T18:32:54,980 INFO [M:0;30db5f576be8:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eaafef7f2b334d2ea6afe86d82dbe3a5, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T18:32:54,980 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d505943131054d14ad78eb0d6364344f as hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d505943131054d14ad78eb0d6364344f 2024-11-19T18:32:54,984 INFO [M:0;30db5f576be8:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d505943131054d14ad78eb0d6364344f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T18:32:54,985 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae2eaec413432eb29931b9b426e252 as hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5aae2eaec413432eb29931b9b426e252 2024-11-19T18:32:54,988 INFO [M:0;30db5f576be8:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46723/user/jenkins/test-data/aef8ab6d-b312-4fb7-7551-9f0a17371f38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5aae2eaec413432eb29931b9b426e252, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T18:32:54,989 INFO [M:0;30db5f576be8:41951 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false 2024-11-19T18:32:54,990 INFO [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T18:32:54,990 DEBUG [M:0;30db5f576be8:41951 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732041174880Disabling compacts and flushes for region at 1732041174880Disabling writes for close at 1732041174881 (+1 ms)Obtaining lock to block concurrent updates at 1732041174881Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732041174881Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732041174881Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732041174882 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732041174882Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732041174896 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732041174896Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732041174905 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732041174918 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732041174919 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732041174927 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732041174941 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732041174941Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732041174949 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732041174963 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732041174963Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@545306f9: reopening flushed file at 1732041174971 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26b2dc99: reopening flushed file at 1732041174975 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@532fa445: reopening flushed file at 1732041174980 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@271366cc: reopening flushed file at 1732041174984 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false at 1732041174989 (+5 ms)Writing region close event to WAL at 1732041174990 (+1 ms)Closed at 1732041174990 2024-11-19T18:32:54,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T18:32:54,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741830_1006 (size=10311) 2024-11-19T18:32:54,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40547 is added to blk_1073741830_1006 (size=10311) 2024-11-19T18:32:54,993 INFO [M:0;30db5f576be8:41951 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T18:32:54,993 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T18:32:54,993 INFO [M:0;30db5f576be8:41951 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41951 2024-11-19T18:32:54,994 INFO [M:0;30db5f576be8:41951 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T18:32:55,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:55,095 INFO [M:0;30db5f576be8:41951 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T18:32:55,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1013180d5880000, quorum=127.0.0.1:58657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T18:32:55,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32c86c86{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:55,098 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a7b167c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:55,098 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:55,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35c58925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:55,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380ffe40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:55,100 WARN [BP-1663235635-172.17.0.2-1732041173019 heartbeating to localhost/127.0.0.1:46723 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:32:55,100 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:32:55,100 WARN [BP-1663235635-172.17.0.2-1732041173019 heartbeating to localhost/127.0.0.1:46723 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1663235635-172.17.0.2-1732041173019 (Datanode Uuid 5125daaa-f034-42a9-ba64-0836715e6119) service to localhost/127.0.0.1:46723 2024-11-19T18:32:55,100 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:32:55,100 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data3/current/BP-1663235635-172.17.0.2-1732041173019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:55,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data4/current/BP-1663235635-172.17.0.2-1732041173019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:55,101 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:32:55,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41f14207{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T18:32:55,103 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a2c3a40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:55,103 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:55,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47f122ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:55,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@150dab73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:55,104 WARN [BP-1663235635-172.17.0.2-1732041173019 heartbeating to localhost/127.0.0.1:46723 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T18:32:55,104 WARN [BP-1663235635-172.17.0.2-1732041173019 heartbeating to localhost/127.0.0.1:46723 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1663235635-172.17.0.2-1732041173019 (Datanode Uuid c930c368-be52-418f-9748-4ee4c706a9e2) service to localhost/127.0.0.1:46723 2024-11-19T18:32:55,104 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T18:32:55,104 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T18:32:55,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data1/current/BP-1663235635-172.17.0.2-1732041173019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:55,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/cluster_cd1d3d2b-8391-25a3-3cb5-dd6acbf6b24b/data/data2/current/BP-1663235635-172.17.0.2-1732041173019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T18:32:55,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T18:32:55,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72d15c59{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T18:32:55,113 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55be9e2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T18:32:55,113 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T18:32:55,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a96c77e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T18:32:55,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134d2ab8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43ed1271-4868-0db6-eae0-fc0c2d279db9/hadoop.log.dir/,STOPPED} 2024-11-19T18:32:55,121 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T18:32:55,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T18:32:55,143 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 230) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46723 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46723 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46723 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46723 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46723 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=21 (was 21), ProcessCount=11 (was 11), AvailableMemoryMB=6505 (was 6512)